summaryrefslogtreecommitdiff
path: root/libs/audiographer/audiographer/general
diff options
context:
space:
mode:
Diffstat (limited to 'libs/audiographer/audiographer/general')
-rw-r--r--libs/audiographer/audiographer/general/analyser.h10
-rw-r--r--libs/audiographer/audiographer/general/chunker.h30
-rw-r--r--libs/audiographer/audiographer/general/deinterleaver.h24
-rw-r--r--libs/audiographer/audiographer/general/interleaver.h54
-rw-r--r--libs/audiographer/audiographer/general/loudness_reader.h6
-rw-r--r--libs/audiographer/audiographer/general/normalizer.h4
-rw-r--r--libs/audiographer/audiographer/general/peak_reader.h2
-rw-r--r--libs/audiographer/audiographer/general/sample_format_converter.h10
-rw-r--r--libs/audiographer/audiographer/general/silence_trimmer.h108
-rw-r--r--libs/audiographer/audiographer/general/sr_converter.h14
10 files changed, 131 insertions, 131 deletions
diff --git a/libs/audiographer/audiographer/general/analyser.h b/libs/audiographer/audiographer/general/analyser.h
index 9bd49b33c8..c912664cf0 100644
--- a/libs/audiographer/audiographer/general/analyser.h
+++ b/libs/audiographer/audiographer/general/analyser.h
@@ -29,7 +29,7 @@ namespace AudioGrapher
class LIBAUDIOGRAPHER_API Analyser : public LoudnessReader
{
public:
- Analyser (float sample_rate, unsigned int channels, framecnt_t bufsize, framecnt_t n_samples);
+ Analyser (float sample_rate, unsigned int channels, samplecnt_t bufsize, samplecnt_t n_samples);
~Analyser ();
void process (ProcessContext<float> const & c);
ARDOUR::ExportAnalysisPtr result ();
@@ -48,10 +48,10 @@ class LIBAUDIOGRAPHER_API Analyser : public LoudnessReader
ARDOUR::ExportAnalysis _result;
- framecnt_t _n_samples;
- framecnt_t _pos;
- framecnt_t _spp;
- framecnt_t _fpp;
+ samplecnt_t _n_samples;
+ samplecnt_t _pos;
+ samplecnt_t _spp;
+ samplecnt_t _fpp;
float* _hann_window;
uint32_t _fft_data_size;
diff --git a/libs/audiographer/audiographer/general/chunker.h b/libs/audiographer/audiographer/general/chunker.h
index 466a333655..28ac79b603 100644
--- a/libs/audiographer/audiographer/general/chunker.h
+++ b/libs/audiographer/audiographer/general/chunker.h
@@ -10,7 +10,7 @@
namespace AudioGrapher
{
-/// A class that chunks process cycles into equal sized frames
+/// A class that chunks process cycles into equal sized samples
template<typename T = DefaultSampleType>
class /*LIBAUDIOGRAPHER_API*/ Chunker
: public ListedSource<T>
@@ -21,7 +21,7 @@ class /*LIBAUDIOGRAPHER_API*/ Chunker
/** Constructs a new Chunker with a constant chunk size.
* \n NOT RT safe
*/
- Chunker (framecnt_t chunk_size)
+ Chunker (samplecnt_t chunk_size)
: chunk_size (chunk_size)
, position (0)
{
@@ -42,29 +42,29 @@ class /*LIBAUDIOGRAPHER_API*/ Chunker
{
check_flags (*this, context);
- framecnt_t frames_left = context.frames();
- framecnt_t input_position = 0;
+ samplecnt_t samples_left = context.samples();
+ samplecnt_t input_position = 0;
- while (position + frames_left >= chunk_size) {
+ while (position + samples_left >= chunk_size) {
// Copy from context to buffer
- framecnt_t const frames_to_copy = chunk_size - position;
- TypeUtils<T>::copy (&context.data()[input_position], &buffer[position], frames_to_copy);
+ samplecnt_t const samples_to_copy = chunk_size - position;
+ TypeUtils<T>::copy (&context.data()[input_position], &buffer[position], samples_to_copy);
// Update counters
position = 0;
- input_position += frames_to_copy;
- frames_left -= frames_to_copy;
+ input_position += samples_to_copy;
+ samples_left -= samples_to_copy;
// Output whole buffer
ProcessContext<T> c_out (context, buffer, chunk_size);
- if (frames_left) { c_out.remove_flag(ProcessContext<T>::EndOfInput); }
+ if (samples_left) { c_out.remove_flag(ProcessContext<T>::EndOfInput); }
ListedSource<T>::output (c_out);
}
- if (frames_left) {
+ if (samples_left) {
// Copy the rest of the data
- TypeUtils<T>::copy (&context.data()[input_position], &buffer[position], frames_left);
- position += frames_left;
+ TypeUtils<T>::copy (&context.data()[input_position], &buffer[position], samples_left);
+ position += samples_left;
}
if (context.has_flag (ProcessContext<T>::EndOfInput) && position > 0) {
@@ -75,8 +75,8 @@ class /*LIBAUDIOGRAPHER_API*/ Chunker
using Sink<T>::process;
private:
- framecnt_t chunk_size;
- framecnt_t position;
+ samplecnt_t chunk_size;
+ samplecnt_t position;
T * buffer;
};
diff --git a/libs/audiographer/audiographer/general/deinterleaver.h b/libs/audiographer/audiographer/general/deinterleaver.h
index 63b6c95589..491c07fcb6 100644
--- a/libs/audiographer/audiographer/general/deinterleaver.h
+++ b/libs/audiographer/audiographer/general/deinterleaver.h
@@ -26,7 +26,7 @@ class /*LIBAUDIOGRAPHER_API*/ DeInterleaver
/// Constructor. \n RT safe
DeInterleaver()
: channels (0)
- , max_frames (0)
+ , max_samples (0)
, buffer (0)
{}
@@ -35,12 +35,12 @@ class /*LIBAUDIOGRAPHER_API*/ DeInterleaver
typedef boost::shared_ptr<Source<T> > SourcePtr;
/// Inits the deinterleaver. Must be called before using. \n Not RT safe
- void init (unsigned int num_channels, framecnt_t max_frames_per_channel)
+ void init (unsigned int num_channels, samplecnt_t max_samples_per_channel)
{
reset();
channels = num_channels;
- max_frames = max_frames_per_channel;
- buffer = new T[max_frames];
+ max_samples = max_samples_per_channel;
+ buffer = new T[max_samples];
for (unsigned int i = 0; i < channels; ++i) {
outputs.push_back (OutputPtr (new IdentityVertex<T>));
@@ -60,28 +60,28 @@ class /*LIBAUDIOGRAPHER_API*/ DeInterleaver
/// Deinterleaves data and outputs it to the outputs. \n RT safe
void process (ProcessContext<T> const & c)
{
- framecnt_t frames = c.frames();
+ samplecnt_t samples = c.samples();
T const * data = c.data();
- framecnt_t const frames_per_channel = frames / channels;
+ samplecnt_t const samples_per_channel = samples / channels;
if (throw_level (ThrowProcess) && c.channels() != channels) {
throw Exception (*this, "wrong amount of channels given to process()");
}
- if (throw_level (ThrowProcess) && frames_per_channel > max_frames) {
- throw Exception (*this, "too many frames given to process()");
+ if (throw_level (ThrowProcess) && samples_per_channel > max_samples) {
+ throw Exception (*this, "too many samples given to process()");
}
unsigned int channel = 0;
for (typename std::vector<OutputPtr>::iterator it = outputs.begin(); it != outputs.end(); ++it, ++channel) {
if (!*it) { continue; }
- for (unsigned int i = 0; i < frames_per_channel; ++i) {
+ for (unsigned int i = 0; i < samples_per_channel; ++i) {
buffer[i] = data[channel + (channels * i)];
}
- ProcessContext<T> c_out (c, buffer, frames_per_channel, 1);
+ ProcessContext<T> c_out (c, buffer, samples_per_channel, 1);
(*it)->process (c_out);
}
}
@@ -96,12 +96,12 @@ class /*LIBAUDIOGRAPHER_API*/ DeInterleaver
delete [] buffer;
buffer = 0;
channels = 0;
- max_frames = 0;
+ max_samples = 0;
}
std::vector<OutputPtr> outputs;
unsigned int channels;
- framecnt_t max_frames;
+ samplecnt_t max_samples;
T * buffer;
};
diff --git a/libs/audiographer/audiographer/general/interleaver.h b/libs/audiographer/audiographer/general/interleaver.h
index c1b5e92cfe..7ea1be1ab8 100644
--- a/libs/audiographer/audiographer/general/interleaver.h
+++ b/libs/audiographer/audiographer/general/interleaver.h
@@ -25,20 +25,20 @@ class /*LIBAUDIOGRAPHER_API*/ Interleaver
/// Constructs an interleaver \n RT safe
Interleaver()
: channels (0)
- , max_frames (0)
+ , max_samples (0)
, buffer (0)
{}
~Interleaver() { reset(); }
/// Inits the interleaver. Must be called before using. \n Not RT safe
- void init (unsigned int num_channels, framecnt_t max_frames_per_channel)
+ void init (unsigned int num_channels, samplecnt_t max_samples_per_channel)
{
reset();
channels = num_channels;
- max_frames = max_frames_per_channel;
+ max_samples = max_samples_per_channel;
- buffer = new T[channels * max_frames];
+ buffer = new T[channels * max_samples];
for (unsigned int i = 0; i < channels; ++i) {
inputs.push_back (InputPtr (new Input (*this, i)));
@@ -63,27 +63,27 @@ class /*LIBAUDIOGRAPHER_API*/ Interleaver
{
public:
Input (Interleaver & parent, unsigned int channel)
- : frames_written (0), parent (parent), channel (channel) {}
+ : samples_written (0), parent (parent), channel (channel) {}
void process (ProcessContext<T> const & c)
{
if (parent.throw_level (ThrowProcess) && c.channels() > 1) {
throw Exception (*this, "Data input has more than on channel");
}
- if (parent.throw_level (ThrowStrict) && frames_written) {
+ if (parent.throw_level (ThrowStrict) && samples_written) {
throw Exception (*this, "Input channels out of sync");
}
- frames_written = c.frames();
+ samples_written = c.samples();
parent.write_channel (c, channel);
}
using Sink<T>::process;
- framecnt_t frames() { return frames_written; }
- void reset() { frames_written = 0; }
+ samplecnt_t samples() { return samples_written; }
+ void reset() { samples_written = 0; }
private:
- framecnt_t frames_written;
+ samplecnt_t samples_written;
Interleaver & parent;
unsigned int channel;
};
@@ -94,7 +94,7 @@ class /*LIBAUDIOGRAPHER_API*/ Interleaver
delete [] buffer;
buffer = 0;
channels = 0;
- max_frames = 0;
+ max_samples = 0;
}
void reset_channels ()
@@ -107,44 +107,44 @@ class /*LIBAUDIOGRAPHER_API*/ Interleaver
void write_channel (ProcessContext<T> const & c, unsigned int channel)
{
- if (throw_level (ThrowProcess) && c.frames() > max_frames) {
+ if (throw_level (ThrowProcess) && c.samples() > max_samples) {
reset_channels();
- throw Exception (*this, "Too many frames given to an input");
+ throw Exception (*this, "Too many samples given to an input");
}
- for (unsigned int i = 0; i < c.frames(); ++i) {
+ for (unsigned int i = 0; i < c.samples(); ++i) {
buffer[channel + (channels * i)] = c.data()[i];
}
- framecnt_t const ready_frames = ready_to_output();
- if (ready_frames) {
- ProcessContext<T> c_out (c, buffer, ready_frames, channels);
+ samplecnt_t const ready_samples = ready_to_output();
+ if (ready_samples) {
+ ProcessContext<T> c_out (c, buffer, ready_samples, channels);
ListedSource<T>::output (c_out);
reset_channels ();
}
}
- framecnt_t ready_to_output()
+ samplecnt_t ready_to_output()
{
- framecnt_t ready_frames = inputs[0]->frames();
- if (!ready_frames) { return 0; }
+ samplecnt_t ready_samples = inputs[0]->samples();
+ if (!ready_samples) { return 0; }
for (unsigned int i = 1; i < channels; ++i) {
- framecnt_t const frames = inputs[i]->frames();
- if (!frames) { return 0; }
- if (throw_level (ThrowProcess) && frames != ready_frames) {
- init (channels, max_frames);
- throw Exception (*this, "Frames count out of sync");
+ samplecnt_t const samples = inputs[i]->samples();
+ if (!samples) { return 0; }
+ if (throw_level (ThrowProcess) && samples != ready_samples) {
+ init (channels, max_samples);
+ throw Exception (*this, "Samples count out of sync");
}
}
- return ready_frames * channels;
+ return ready_samples * channels;
}
typedef boost::shared_ptr<Input> InputPtr;
std::vector<InputPtr> inputs;
unsigned int channels;
- framecnt_t max_frames;
+ samplecnt_t max_samples;
T * buffer;
};
diff --git a/libs/audiographer/audiographer/general/loudness_reader.h b/libs/audiographer/audiographer/general/loudness_reader.h
index c86fd521da..8c9ccb4149 100644
--- a/libs/audiographer/audiographer/general/loudness_reader.h
+++ b/libs/audiographer/audiographer/general/loudness_reader.h
@@ -32,7 +32,7 @@ namespace AudioGrapher
class LIBAUDIOGRAPHER_API LoudnessReader : public ListedSource<float>, public Sink<float>
{
public:
- LoudnessReader (float sample_rate, unsigned int channels, framecnt_t bufsize);
+ LoudnessReader (float sample_rate, unsigned int channels, samplecnt_t bufsize);
~LoudnessReader ();
void reset ();
@@ -52,8 +52,8 @@ class LIBAUDIOGRAPHER_API LoudnessReader : public ListedSource<float>, public Si
float _sample_rate;
unsigned int _channels;
- framecnt_t _bufsize;
- framecnt_t _pos;
+ samplecnt_t _bufsize;
+ samplecnt_t _pos;
float* _bufs[2];
};
diff --git a/libs/audiographer/audiographer/general/normalizer.h b/libs/audiographer/audiographer/general/normalizer.h
index e5f73a0f08..cd4e375db2 100644
--- a/libs/audiographer/audiographer/general/normalizer.h
+++ b/libs/audiographer/audiographer/general/normalizer.h
@@ -28,7 +28,7 @@ public:
* non-const ProcessContexts are given to \a process() .
* \n Not RT safe
*/
- void alloc_buffer(framecnt_t frames);
+ void alloc_buffer(samplecnt_t samples);
/// Process a const ProcessContext \see alloc_buffer() \n RT safe
void process (ProcessContext<float> const & c);
@@ -42,7 +42,7 @@ private:
float gain;
float * buffer;
- framecnt_t buffer_size;
+ samplecnt_t buffer_size;
};
diff --git a/libs/audiographer/audiographer/general/peak_reader.h b/libs/audiographer/audiographer/general/peak_reader.h
index 8bf0faa792..bb04239544 100644
--- a/libs/audiographer/audiographer/general/peak_reader.h
+++ b/libs/audiographer/audiographer/general/peak_reader.h
@@ -25,7 +25,7 @@ class /*LIBAUDIOGRAPHER_API*/ PeakReader : public ListedSource<float>, public Si
/// Finds peaks from the data \n RT safe
void process (ProcessContext<float> const & c)
{
- peak = Routines::compute_peak (c.data(), c.frames(), peak);
+ peak = Routines::compute_peak (c.data(), c.samples(), peak);
ListedSource<float>::output(c);
}
diff --git a/libs/audiographer/audiographer/general/sample_format_converter.h b/libs/audiographer/audiographer/general/sample_format_converter.h
index 96dd6aa72a..9a79fc927c 100644
--- a/libs/audiographer/audiographer/general/sample_format_converter.h
+++ b/libs/audiographer/audiographer/general/sample_format_converter.h
@@ -35,13 +35,13 @@ class LIBAUDIOGRAPHER_API SampleFormatConverter
~SampleFormatConverter ();
/** Initialize and allocate buffers for processing.
- * \param max_frames maximum number of frames that is allowed to be used in calls to \a process()
+ * \param max_samples maximum number of samples that is allowed to be used in calls to \a process()
* \param type dither type from \a DitherType
* \param data_width data with in bits
* \note If the non-const version of process() is used with floats,
* there is no need to call this function.
*/
- void init (framecnt_t max_frames, int type, int data_width);
+ void init (samplecnt_t max_samples, int type, int data_width);
/// Set whether or not clipping to [-1.0, 1.0] should occur when TOut = float. Clipping is off by default
void set_clip_floats (bool yn) { clip_floats = yn; }
@@ -54,12 +54,12 @@ class LIBAUDIOGRAPHER_API SampleFormatConverter
private:
void reset();
- void init_common (framecnt_t max_frames); // not-template-specialized part of init
- void check_frame_and_channel_count (framecnt_t frames, ChannelCount channels_);
+ void init_common (samplecnt_t max_samples); // not-template-specialized part of init
+ void check_sample_and_channel_count (samplecnt_t samples, ChannelCount channels_);
ChannelCount channels;
GDither dither;
- framecnt_t data_out_size;
+ samplecnt_t data_out_size;
TOut * data_out;
bool clip_floats;
diff --git a/libs/audiographer/audiographer/general/silence_trimmer.h b/libs/audiographer/audiographer/general/silence_trimmer.h
index 7cfa0658f1..e09c348c52 100644
--- a/libs/audiographer/audiographer/general/silence_trimmer.h
+++ b/libs/audiographer/audiographer/general/silence_trimmer.h
@@ -30,7 +30,7 @@ struct SilenceTester<float> {
};
-/// Removes and adds silent frames to beginning and/or end of stream
+/// Removes and adds silent samples to beginning and/or end of stream
template<typename T = DefaultSampleType>
class /*LIBAUDIOGRAPHER_API*/ SilenceTrimmer
: public ListedSource<T>
@@ -41,7 +41,7 @@ class /*LIBAUDIOGRAPHER_API*/ SilenceTrimmer
public:
/// Constructor, \see reset() \n Not RT safe
- SilenceTrimmer(framecnt_t silence_buffer_size_ = 1024, float thresh_dB = -INFINITY)
+ SilenceTrimmer(samplecnt_t silence_buffer_size_ = 1024, float thresh_dB = -INFINITY)
: silence_buffer_size (0)
, silence_buffer (0)
, tester (thresh_dB)
@@ -60,7 +60,7 @@ class /*LIBAUDIOGRAPHER_API*/ SilenceTrimmer
* This also defines the maximum length of output process context
* which can be output during long intermediate silence.
*/
- void reset (framecnt_t silence_buffer_size_ = 1024)
+ void reset (samplecnt_t silence_buffer_size_ = 1024)
{
if (throw_level (ThrowObject) && silence_buffer_size_ == 0) {
throw Exception (*this,
@@ -78,34 +78,34 @@ class /*LIBAUDIOGRAPHER_API*/ SilenceTrimmer
processing_finished = false;
trim_beginning = false;
trim_end = false;
- silence_frames = 0;
+ silence_samples = 0;
max_output_frames = 0;
add_to_beginning = 0;
add_to_end = 0;
}
- /** Tells that \a frames_per_channel frames of silence per channel should be added to beginning
+ /** Tells that \a samples_per_channel samples of silence per channel should be added to beginning
* Needs to be called before starting processing.
* \n RT safe
*/
- void add_silence_to_beginning (framecnt_t frames_per_channel)
+ void add_silence_to_beginning (samplecnt_t samples_per_channel)
{
if (throw_level (ThrowObject) && processed_data) {
throw Exception(*this, "Tried to add silence to beginning after processing started");
}
- add_to_beginning = frames_per_channel;
+ add_to_beginning = samples_per_channel;
}
- /** Tells that \a frames_per_channel frames of silence per channel should be added to end
+ /** Tells that \a samples_per_channel samples of silence per channel should be added to end
* Needs to be called before end is reached.
* \n RT safe
*/
- void add_silence_to_end (framecnt_t frames_per_channel)
+ void add_silence_to_end (samplecnt_t samples_per_channel)
{
if (throw_level (ThrowObject) && processed_data) {
throw Exception(*this, "Tried to add silence to end after processing started");
}
- add_to_end = frames_per_channel;
+ add_to_end = samples_per_channel;
}
/** Tells whether ot nor silence should be trimmed from the beginning
@@ -166,17 +166,17 @@ class /*LIBAUDIOGRAPHER_API*/ SilenceTrimmer
* may lend itself for some inspiration.
*/
- framecnt_t output_start_index = 0;
- framecnt_t output_sample_count = c.frames();
+ samplecnt_t output_start_index = 0;
+ samplecnt_t output_sample_count = c.samples();
if (!processed_data) {
if (trim_beginning) {
- framecnt_t first_non_silent_frame_index = 0;
- if (find_first_non_silent_frame (c, first_non_silent_frame_index)) {
+ samplecnt_t first_non_silent_sample_index = 0;
+ if (find_first_non_silent_sample (c, first_non_silent_sample_index)) {
// output from start of non-silent data until end of buffer
// output_sample_count may also be altered in trim end
- output_start_index = first_non_silent_frame_index;
- output_sample_count = c.frames() - first_non_silent_frame_index;
+ output_start_index = first_non_silent_sample_index;
+ output_sample_count = c.samples() - first_non_silent_sample_index;
processed_data = true;
} else {
// keep entering this block until non-silence is found to trim
@@ -189,35 +189,35 @@ class /*LIBAUDIOGRAPHER_API*/ SilenceTrimmer
// This block won't be called again so add silence to beginning
if (processed_data && add_to_beginning) {
add_to_beginning *= c.channels ();
- output_silence_frames (c, add_to_beginning);
+ output_silence_samples (c, add_to_beginning);
}
}
if (processed_data) {
if (trim_end) {
- framecnt_t first_non_silent_frame_index = 0;
- if (find_first_non_silent_frame (c, first_non_silent_frame_index)) {
+ samplecnt_t first_non_silent_sample_index = 0;
+ if (find_first_non_silent_sample (c, first_non_silent_sample_index)) {
// context buffer contains non-silent data, flush any intermediate silence
- output_silence_frames (c, silence_frames);
+ output_silence_samples (c, silence_samples);
- framecnt_t silent_frame_index = 0;
- find_last_silent_frame_reverse (c, silent_frame_index);
+ samplecnt_t silent_sample_index = 0;
+ find_last_silent_sample_reverse (c, silent_sample_index);
// Count of samples at end of block that are "silent", may be zero.
- framecnt_t silent_end_samples = c.frames () - silent_frame_index;
- framecnt_t samples_before_silence = c.frames() - silent_end_samples;
+ samplecnt_t silent_end_samples = c.samples () - silent_sample_index;
+ samplecnt_t samples_before_silence = c.samples() - silent_end_samples;
- assert (samples_before_silence + silent_end_samples == c.frames ());
+ assert (samples_before_silence + silent_end_samples == c.samples ());
// output_start_index may be non-zero if start trim occurred above
output_sample_count = samples_before_silence - output_start_index;
// keep track of any silent samples not output
- silence_frames = silent_end_samples;
+ silence_samples = silent_end_samples;
} else {
// whole context buffer is silent output nothing
- silence_frames += c.frames ();
+ silence_samples += c.samples ();
output_sample_count = 0;
}
}
@@ -230,7 +230,7 @@ class /*LIBAUDIOGRAPHER_API*/ SilenceTrimmer
// Finally, if in last process call, add silence to end
if (processing_finished && processed_data && add_to_end) {
add_to_end *= c.channels();
- output_silence_frames (c, add_to_end);
+ output_silence_samples (c, add_to_end);
}
if (processing_finished) {
@@ -250,13 +250,13 @@ class /*LIBAUDIOGRAPHER_API*/ SilenceTrimmer
private:
- bool find_first_non_silent_frame (ProcessContext<T> const & c, framecnt_t & result_frame)
+ bool find_first_non_silent_sample (ProcessContext<T> const & c, samplecnt_t & result_sample)
{
- for (framecnt_t i = 0; i < c.frames(); ++i) {
+ for (samplecnt_t i = 0; i < c.samples(); ++i) {
if (!tester.is_silent (c.data()[i])) {
- result_frame = i;
+ result_sample = i;
// Round down to nearest interleaved "frame" beginning
- result_frame -= result_frame % c.channels();
+ result_sample -= result_sample % c.channels();
return true;
}
}
@@ -264,43 +264,43 @@ private:
}
/**
- * Reverse find the last silent frame index. If the last sample in the
+ * Reverse find the last silent sample index. If the last sample in the
* buffer is non-silent the index will be one past the end of the buffer and
- * equal to c.frames(). e.g silent_end_samples = c.frames() - result_frame
+ * equal to c.samples(). e.g silent_end_samples = c.samples() - result_sample
*
- * @return true if result_frame index is valid, false if there were only
+ * @return true if result_sample index is valid, false if there were only
* silent samples in the context buffer
*/
- bool find_last_silent_frame_reverse (ProcessContext<T> const & c, framecnt_t & result_frame)
+ bool find_last_silent_sample_reverse (ProcessContext<T> const & c, samplecnt_t & result_sample)
{
- framecnt_t last_sample_index = c.frames() - 1;
+ samplecnt_t last_sample_index = c.samples() - 1;
- for (framecnt_t i = last_sample_index; i >= 0; --i) {
+ for (samplecnt_t i = last_sample_index; i >= 0; --i) {
if (!tester.is_silent (c.data()[i])) {
- result_frame = i;
+ result_sample = i;
// Round down to nearest interleaved "frame" beginning
- result_frame -= result_frame % c.channels();
- // Round up to return the "last" silent interleaved frame
- result_frame += c.channels();
+ result_sample -= result_sample % c.channels();
+ // Round up to return the "last" silent interleaved sample
+ result_sample += c.channels();
return true;
}
}
return false;
}
- void output_silence_frames (ProcessContext<T> const & c, framecnt_t & total_frames)
+ void output_silence_samples (ProcessContext<T> const & c, samplecnt_t & total_samples)
{
assert (!c.has_flag (ProcessContext<T>::EndOfInput));
- while (total_frames > 0) {
- framecnt_t frames = std::min (silence_buffer_size, total_frames);
+ while (total_samples > 0) {
+ samplecnt_t samples = std::min (silence_buffer_size, total_samples);
if (max_output_frames) {
- frames = std::min (frames, max_output_frames);
+ samples = std::min (samples, max_output_frames);
}
- frames -= frames % c.channels();
+ samples -= samples % c.channels();
- total_frames -= frames;
- ConstProcessContext<T> c_out (c, silence_buffer, frames);
+ total_samples -= samples;
+ ConstProcessContext<T> c_out (c, silence_buffer, samples);
ListedSource<T>::output (c_out);
}
}
@@ -311,13 +311,13 @@ private:
bool trim_beginning;
bool trim_end;
- framecnt_t silence_frames;
- framecnt_t max_output_frames;
+ samplecnt_t silence_samples;
+ samplecnt_t max_output_frames;
- framecnt_t add_to_beginning;
- framecnt_t add_to_end;
+ samplecnt_t add_to_beginning;
+ samplecnt_t add_to_end;
- framecnt_t silence_buffer_size;
+ samplecnt_t silence_buffer_size;
T * silence_buffer;
SilenceTester<T> tester;
diff --git a/libs/audiographer/audiographer/general/sr_converter.h b/libs/audiographer/audiographer/general/sr_converter.h
index a2e94d9bc0..8edc4b8c99 100644
--- a/libs/audiographer/audiographer/general/sr_converter.h
+++ b/libs/audiographer/audiographer/general/sr_converter.h
@@ -26,10 +26,10 @@ class LIBAUDIOGRAPHER_API SampleRateConverter
~SampleRateConverter ();
/// Init converter \n Not RT safe
- void init (framecnt_t in_rate, framecnt_t out_rate, int quality = 0);
+ void init (samplecnt_t in_rate, samplecnt_t out_rate, int quality = 0);
- /// Returns max amount of frames that will be output \n RT safe
- framecnt_t allocate_buffers (framecnt_t max_frames);
+ /// Returns max amount of samples that will be output \n RT safe
+ samplecnt_t allocate_buffers (samplecnt_t max_samples);
/** Does sample rate conversion.
* Note that outpt size may vary a lot.
@@ -47,14 +47,14 @@ class LIBAUDIOGRAPHER_API SampleRateConverter
bool active;
uint32_t channels;
- framecnt_t max_frames_in;
+ samplecnt_t max_samples_in;
float * leftover_data;
- framecnt_t leftover_frames;
- framecnt_t max_leftover_frames;
+ samplecnt_t leftover_samples;
+ samplecnt_t max_leftover_samples;
float * data_out;
- framecnt_t data_out_size;
+ samplecnt_t data_out_size;
SRC_DATA src_data;
SRC_STATE* src_state;