summaryrefslogtreecommitdiff
path: root/libs/appleutility/CoreAudio/AudioUnits/AUPublic/OtherBases/AUEffectBase.cpp
blob: c7a8c651ab85951da6fe33e67d9dc843a2d0b042 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
/*
     File: AUEffectBase.cpp
 Abstract: AUEffectBase.h
  Version: 1.1

 Disclaimer: IMPORTANT:  This Apple software is supplied to you by Apple
 Inc. ("Apple") in consideration of your agreement to the following
 terms, and your use, installation, modification or redistribution of
 this Apple software constitutes acceptance of these terms.  If you do
 not agree with these terms, please do not use, install, modify or
 redistribute this Apple software.

 In consideration of your agreement to abide by the following terms, and
 subject to these terms, Apple grants you a personal, non-exclusive
 license, under Apple's copyrights in this original Apple software (the
 "Apple Software"), to use, reproduce, modify and redistribute the Apple
 Software, with or without modifications, in source and/or binary forms;
 provided that if you redistribute the Apple Software in its entirety and
 without modifications, you must retain this notice and the following
 text and disclaimers in all such redistributions of the Apple Software.
 Neither the name, trademarks, service marks or logos of Apple Inc. may
 be used to endorse or promote products derived from the Apple Software
 without specific prior written permission from Apple.  Except as
 expressly stated in this notice, no other rights or licenses, express or
 implied, are granted by Apple herein, including but not limited to any
 patent rights that may be infringed by your derivative works or by other
 works in which the Apple Software may be incorporated.

 The Apple Software is provided by Apple on an "AS IS" basis.  APPLE
 MAKES NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION
 THE IMPLIED WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS
 FOR A PARTICULAR PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND
 OPERATION ALONE OR IN COMBINATION WITH YOUR PRODUCTS.

 IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL
 OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 INTERRUPTION) ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION,
 MODIFICATION AND/OR DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED
 AND WHETHER UNDER THEORY OF CONTRACT, TORT (INCLUDING NEGLIGENCE),
 STRICT LIABILITY OR OTHERWISE, EVEN IF APPLE HAS BEEN ADVISED OF THE
 POSSIBILITY OF SUCH DAMAGE.

 Copyright (C) 2014 Apple Inc. All Rights Reserved.

*/
#include "AUEffectBase.h"

/*
	This class does not deal as well as it should with N-M effects...

	The problem areas are (if the channels don't match):
		ProcessInPlace if the channels don't match - there will be problems if InputChan != OutputChan
		Bypass - its just passing the buffers through when not processing them

	This will be fixed in a future update...
*/

//_____________________________________________________________________________
//
AUEffectBase::AUEffectBase(	AudioComponentInstance	audioUnit,
							bool					inProcessesInPlace ) :
	AUBase(audioUnit, 1, 1),		// 1 in bus, 1 out bus
	mBypassEffect(false),
	mParamSRDep (false),
	mProcessesInPlace(inProcessesInPlace),
	mMainOutput(NULL), mMainInput(NULL)
#if TARGET_OS_IPHONE
	, mOnlyOneKernel(false)
#endif
{
}

//_____________________________________________________________________________
//
AUEffectBase::~AUEffectBase()
{
	Cleanup();
}

//_____________________________________________________________________________
//
void AUEffectBase::Cleanup()
{
	for (KernelList::iterator it = mKernelList.begin(); it != mKernelList.end(); ++it)
		delete *it;

	mKernelList.clear();
	mMainOutput = NULL;
	mMainInput = NULL;
}


//_____________________________________________________________________________
//
OSStatus AUEffectBase::Initialize()
{
		// get our current numChannels for input and output
	SInt16 auNumInputs = (SInt16) GetInput(0)->GetStreamFormat().mChannelsPerFrame;
	SInt16 auNumOutputs = (SInt16) GetOutput(0)->GetStreamFormat().mChannelsPerFrame;

		// does the unit publish specific information about channel configurations?
    const AUChannelInfo *auChannelConfigs = NULL;
    UInt32 numIOconfigs = SupportedNumChannels(&auChannelConfigs);

    if ((numIOconfigs > 0) && (auChannelConfigs != NULL))
    {
        bool foundMatch = false;
        for (UInt32 i = 0; (i < numIOconfigs) && !foundMatch; ++i)
        {
            SInt16 configNumInputs = auChannelConfigs[i].inChannels;
            SInt16 configNumOutputs = auChannelConfigs[i].outChannels;
            if ((configNumInputs < 0) && (configNumOutputs < 0))
            {
					// unit accepts any number of channels on input and output
                if (((configNumInputs == -1) && (configNumOutputs == -2))
					|| ((configNumInputs == -2) && (configNumOutputs == -1)))
                {
				    foundMatch = true;
					// unit accepts any number of channels on input and output IFF they are the same number on both scopes
                }
				else if (((configNumInputs == -1) && (configNumOutputs == -1)) && (auNumInputs == auNumOutputs))
                {
				    foundMatch = true;
					// unit has specified a particular number of channels on both scopes
                }
				else
                    continue;
            }
            else
            {
					// the -1 case on either scope is saying that the unit doesn't care about the
					// number of channels on that scope
                bool inputMatch = (auNumInputs == configNumInputs) || (configNumInputs == -1);
                bool outputMatch = (auNumOutputs == configNumOutputs) || (configNumOutputs == -1);
                if (inputMatch && outputMatch)
                    foundMatch = true;
            }
        }
        if (!foundMatch)
            return kAudioUnitErr_FormatNotSupported;
    }
    else
    {
			// there is no specifically published channel info
			// so for those kinds of effects, the assumption is that the channels (whatever their number)
			// should match on both scopes
        if ((auNumOutputs != auNumInputs) || (auNumOutputs == 0))
		{
		    return kAudioUnitErr_FormatNotSupported;
		}
    }

    MaintainKernels();

	mMainOutput = GetOutput(0);
	mMainInput = GetInput(0);

	const CAStreamBasicDescription& format = GetStreamFormat(kAudioUnitScope_Output, 0);
	format.IdentifyCommonPCMFormat(mCommonPCMFormat, NULL);
	mBytesPerFrame = format.mBytesPerFrame;

    return noErr;
}

OSStatus			AUEffectBase::Reset(		AudioUnitScope 		inScope,
								 				AudioUnitElement 	inElement)
{
	for (KernelList::iterator it = mKernelList.begin(); it != mKernelList.end(); ++it) {
		AUKernelBase *kernel = *it;
		if (kernel != NULL)
			kernel->Reset();
	}

	return AUBase::Reset(inScope, inElement);
}

OSStatus			AUEffectBase::GetPropertyInfo (AudioUnitPropertyID	inID,
												AudioUnitScope					inScope,
												AudioUnitElement				inElement,
												UInt32 &						outDataSize,
												Boolean &						outWritable)
{
	if (inScope == kAudioUnitScope_Global) {
		switch (inID) {
			case kAudioUnitProperty_BypassEffect:
				outWritable = true;
				outDataSize = sizeof (UInt32);
				return noErr;
			case kAudioUnitProperty_InPlaceProcessing:
				outWritable = true;
				outDataSize = sizeof (UInt32);
				return noErr;
		}
	}
	return AUBase::GetPropertyInfo (inID, inScope, inElement, outDataSize, outWritable);
}


OSStatus			AUEffectBase::GetProperty (AudioUnitPropertyID 		inID,
									  AudioUnitScope 					inScope,
									  AudioUnitElement			 		inElement,
									  void *							outData)
{
	if (inScope == kAudioUnitScope_Global) {
		switch (inID) {
			case kAudioUnitProperty_BypassEffect:
				*((UInt32*)outData) = (IsBypassEffect() ? 1 : 0);
				return noErr;
			case kAudioUnitProperty_InPlaceProcessing:
				*((UInt32*)outData) = (mProcessesInPlace ? 1 : 0);
				return noErr;
		}
	}
	return AUBase::GetProperty (inID, inScope, inElement, outData);
}


OSStatus			AUEffectBase::SetProperty(		AudioUnitPropertyID inID,
									   AudioUnitScope 		inScope,
									   AudioUnitElement 	inElement,
									   const void *			inData,
									   UInt32 				inDataSize)
{
	if (inScope == kAudioUnitScope_Global) {
		switch (inID) {
			case kAudioUnitProperty_BypassEffect:
			{
				if (inDataSize < sizeof(UInt32))
					return kAudioUnitErr_InvalidPropertyValue;

				bool tempNewSetting = *((UInt32*)inData) != 0;
					// we're changing the state of bypass
				if (tempNewSetting != IsBypassEffect())
				{
					if (!tempNewSetting && IsBypassEffect() && IsInitialized()) // turning bypass off and we're initialized
						Reset(0, 0);
					SetBypassEffect (tempNewSetting);
				}
				return noErr;
			}
			case kAudioUnitProperty_InPlaceProcessing:
				mProcessesInPlace = (*((UInt32*)inData) != 0);
				return noErr;
		}
	}
	return AUBase::SetProperty (inID, inScope, inElement, inData, inDataSize);
}


void	AUEffectBase::MaintainKernels()
{
#if TARGET_OS_IPHONE
	UInt32 nKernels = mOnlyOneKernel ? 1 : GetNumberOfChannels();
#else
	UInt32 nKernels = GetNumberOfChannels();
#endif

	if (mKernelList.size() < nKernels) {
		mKernelList.reserve(nKernels);
		for (UInt32 i = (UInt32)mKernelList.size(); i < nKernels; ++i)
			mKernelList.push_back(NewKernel());
	} else {
		while (mKernelList.size() > nKernels) {
			AUKernelBase *kernel = mKernelList.back();
			delete kernel;
			mKernelList.pop_back();
		}
	}

	for(unsigned int i = 0; i < nKernels; i++ )
	{
		if(mKernelList[i]) {
			mKernelList[i]->SetChannelNum (i);
		}
	}
}

bool		AUEffectBase::StreamFormatWritable(	AudioUnitScope					scope,
												AudioUnitElement				element)
{
	return IsInitialized() ? false : true;
}

OSStatus			AUEffectBase::ChangeStreamFormat(	AudioUnitScope				inScope,
														AudioUnitElement			inElement,
														const CAStreamBasicDescription & inPrevFormat,
														const CAStreamBasicDescription & inNewFormat)
{
	OSStatus result = AUBase::ChangeStreamFormat(inScope, inElement, inPrevFormat, inNewFormat);
	if (result == noErr)
	{
		// for the moment this only dependency we know about
		// where a parameter's range may change is with the sample rate
		// and effects are only publishing parameters in the global scope!
		if (GetParamHasSampleRateDependency() && fnotequal(inPrevFormat.mSampleRate, inNewFormat.mSampleRate))
			PropertyChanged(kAudioUnitProperty_ParameterList, kAudioUnitScope_Global, 0);
	}

	return result;
}


// ____________________________________________________________________________
//
//	This method is called (potentially repeatedly) by ProcessForScheduledParams()
//	in order to perform the actual DSP required for this portion of the entire buffer
//	being processed.  The entire buffer can be divided up into smaller "slices"
//	according to the timestamps on the scheduled parameters...
//
OSStatus		AUEffectBase::ProcessScheduledSlice(	void				*inUserData,
														UInt32				inStartFrameInBuffer,
														UInt32				inSliceFramesToProcess,
														UInt32				inTotalBufferFrames )
{
	ScheduledProcessParams	&sliceParams = *((ScheduledProcessParams*)inUserData);

	AudioUnitRenderActionFlags 	&actionFlags = *sliceParams.actionFlags;
	AudioBufferList 			&inputBufferList = *sliceParams.inputBufferList;
	AudioBufferList 			&outputBufferList = *sliceParams.outputBufferList;

	UInt32 channelSize = inSliceFramesToProcess * mBytesPerFrame;
		// fix the size of the buffer we're operating on before we render this slice of time
	for(unsigned int i = 0; i < inputBufferList.mNumberBuffers; i++ ) {
		inputBufferList.mBuffers[i].mDataByteSize = inputBufferList.mBuffers[i].mNumberChannels * channelSize;
	}

	for(unsigned int i = 0; i < outputBufferList.mNumberBuffers; i++ ) {
		outputBufferList.mBuffers[i].mDataByteSize = outputBufferList.mBuffers[i].mNumberChannels * channelSize;
	}
		// process the buffer
	OSStatus result = ProcessBufferLists(actionFlags, inputBufferList, outputBufferList, inSliceFramesToProcess );

		// we just partially processed the buffers, so increment the data pointers to the next part of the buffer to process
	for(unsigned int i = 0; i < inputBufferList.mNumberBuffers; i++ ) {
		inputBufferList.mBuffers[i].mData =
			(char *)inputBufferList.mBuffers[i].mData + inputBufferList.mBuffers[i].mNumberChannels * channelSize;
	}

	for(unsigned int i = 0; i < outputBufferList.mNumberBuffers; i++ ) {
		outputBufferList.mBuffers[i].mData =
			(char *)outputBufferList.mBuffers[i].mData + outputBufferList.mBuffers[i].mNumberChannels * channelSize;
	}

	return result;
}

// ____________________________________________________________________________
//

OSStatus 	AUEffectBase::Render(	AudioUnitRenderActionFlags &ioActionFlags,
											const AudioTimeStamp &		inTimeStamp,
											UInt32						nFrames)
{
	if (!HasInput(0))
		return kAudioUnitErr_NoConnection;

	OSStatus result = noErr;

	result = mMainInput->PullInput(ioActionFlags, inTimeStamp, 0 /* element */, nFrames);

	if (result == noErr)
	{
		if(ProcessesInPlace() && mMainOutput->WillAllocateBuffer())
		{
			mMainOutput->SetBufferList(mMainInput->GetBufferList() );
		}

		if (ShouldBypassEffect())
		{
			// leave silence bit alone

			if(!ProcessesInPlace() )
			{
				mMainInput->CopyBufferContentsTo (mMainOutput->GetBufferList());
			}
		}
		else
		{
			if(mParamList.size() == 0 )
			{
				// this will read/write silence bit
				result = ProcessBufferLists(ioActionFlags, mMainInput->GetBufferList(), mMainOutput->GetBufferList(), nFrames);
			}
			else
			{
				// deal with scheduled parameters...

				AudioBufferList &inputBufferList = mMainInput->GetBufferList();
				AudioBufferList &outputBufferList = mMainOutput->GetBufferList();

				ScheduledProcessParams processParams;
				processParams.actionFlags = &ioActionFlags;
				processParams.inputBufferList = &inputBufferList;
				processParams.outputBufferList = &outputBufferList;

				// divide up the buffer into slices according to scheduled params then
				// do the DSP for each slice (ProcessScheduledSlice() called for each slice)
				result = ProcessForScheduledParams(	mParamList,
													nFrames,
													&processParams );


				// fixup the buffer pointers to how they were before we started
				UInt32 channelSize = nFrames * mBytesPerFrame;
				for(unsigned int i = 0; i < inputBufferList.mNumberBuffers; i++ ) {
					UInt32 size = inputBufferList.mBuffers[i].mNumberChannels * channelSize;
					inputBufferList.mBuffers[i].mData = (char *)inputBufferList.mBuffers[i].mData - size;
					inputBufferList.mBuffers[i].mDataByteSize = size;
				}

				for(unsigned int i = 0; i < outputBufferList.mNumberBuffers; i++ ) {
					UInt32 size = outputBufferList.mBuffers[i].mNumberChannels * channelSize;
					outputBufferList.mBuffers[i].mData = (char *)outputBufferList.mBuffers[i].mData - size;
					outputBufferList.mBuffers[i].mDataByteSize = size;
				}
			}
		}

		if ( (ioActionFlags & kAudioUnitRenderAction_OutputIsSilence) && !ProcessesInPlace() )
		{
			AUBufferList::ZeroBuffer(mMainOutput->GetBufferList() );
		}
	}

	return result;
}


OSStatus	AUEffectBase::ProcessBufferLists(
									AudioUnitRenderActionFlags &	ioActionFlags,
									const AudioBufferList &			inBuffer,
									AudioBufferList &				outBuffer,
									UInt32							inFramesToProcess )
{
	if (ShouldBypassEffect())
		return noErr;

	// interleaved (or mono)
	switch (mCommonPCMFormat) {
		case CAStreamBasicDescription::kPCMFormatFloat32 :
			ProcessBufferListsT<Float32>(ioActionFlags, inBuffer, outBuffer, inFramesToProcess);
			break;
		case CAStreamBasicDescription::kPCMFormatFixed824 :
			ProcessBufferListsT<SInt32>(ioActionFlags, inBuffer, outBuffer, inFramesToProcess);
			break;
		case CAStreamBasicDescription::kPCMFormatInt16 :
			ProcessBufferListsT<SInt16>(ioActionFlags, inBuffer, outBuffer, inFramesToProcess);
			break;
		default :
			throw CAException(kAudio_UnimplementedError);
	}

	return noErr;
}

Float64		AUEffectBase::GetSampleRate()
{
	return GetOutput(0)->GetStreamFormat().mSampleRate;
}

UInt32		AUEffectBase::GetNumberOfChannels()
{
	return GetOutput(0)->GetStreamFormat().mChannelsPerFrame;
}