coreaudio.c revision c58f1213e628a545081c70e26c6b67a841cff880
/* $Id$ */
/** @file
* VBox audio devices: Mac OS X CoreAudio audio driver
*/
/*
* Copyright (C) 2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
#define LOG_GROUP LOG_GROUP_DEV_AUDIO
#define AUDIO_CAP "coreaudio"
#include "vl_vbox.h"
#include "audio.h"
#include "audio_int.h"
#include <CoreAudio/CoreAudio.h>
#include <CoreServices/CoreServices.h>
#include <AudioUnit/AudioUnit.h>
#include <AudioToolbox/AudioConverter.h>
/* todo:
*/
/* Most of this is based on:
*/
/*#define CA_EXTENSIVE_LOGGING*/
/*******************************************************************************
*
* IO Ring Buffer section
*
******************************************************************************/
/* Implementation of a lock free ring buffer which could be used in a multi
* threaded environment. Note that only the acquire, release and getter
* functions are threading aware. So don't use reset if the ring buffer is
* still in use. */
typedef struct IORINGBUFFER
{
/* The current read position in the buffer */
/* The current write position in the buffer */
/* How much space of the buffer is currently in use */
volatile uint32_t cBufferUsed;
/* How big is the buffer */
/* The buffer itself */
char *pBuffer;
} IORINGBUFFER;
/* Pointer to an ring buffer structure */
typedef IORINGBUFFER* PIORINGBUFFER;
{
if (pTmpBuffer)
{
if(pTmpBuffer->pBuffer)
{
*ppBuffer = pTmpBuffer;
}
else
}
}
{
if (pBuffer)
{
}
}
{
pBuffer->cBufferUsed = 0;
}
{
}
{
}
{
}
static void IORingBufferAquireReadBlock(PIORINGBUFFER pBuffer, uint32_t cReqSize, char **ppStart, uint32_t *pcSize)
{
*ppStart = 0;
*pcSize = 0;
/* How much is in use? */
if (uUsed > 0)
{
/* Get the size out of the requested size, the read block till the end
* of the buffer & the currently used size. */
if (uSize > 0)
{
/* Return the pointer address which point to the current read
* position. */
}
}
}
{
/* Split at the end of the buffer. */
}
static void IORingBufferAquireWriteBlock(PIORINGBUFFER pBuffer, uint32_t cReqSize, char **ppStart, uint32_t *pcSize)
{
*ppStart = 0;
*pcSize = 0;
/* How much is free? */
if (uFree > 0)
{
/* Get the size out of the requested size, the write block till the end
* of the buffer & the currently free size. */
if (uSize > 0)
{
/* Return the pointer address which point to the current write
* position. */
}
}
}
{
/* Split at the end of the buffer. */
}
/*******************************************************************************
*
* Helper function section
*
******************************************************************************/
#if DEBUG
static void caDebugOutputAudioStreamBasicDescription(const char *pszDesc, const AudioStreamBasicDescription *pStreamDesc)
{
char pszSampleRate[32];
Log(("CoreAudio: Format ID: %RU32 (%c%c%c%c)\n", pStreamDesc->mFormatID, RT_BYTE4(pStreamDesc->mFormatID), RT_BYTE3(pStreamDesc->mFormatID), RT_BYTE2(pStreamDesc->mFormatID), RT_BYTE1(pStreamDesc->mFormatID)));
Log((" Float"));
Log((" BigEndian"));
Log((" SignedInteger"));
Log((" Packed"));
Log((" AlignedHigh"));
Log((" NonInterleaved"));
Log((" NonMixable"));
Log((" AllClear"));
Log(("\n"));
}
#endif /* DEBUG */
static void caPCMInfoToAudioStreamBasicDescription(struct audio_pcm_info *pInfo, AudioStreamBasicDescription *pStreamDesc)
{
}
static OSStatus caSetFrameBufferSize(AudioDeviceID device, bool fInput, UInt32 cReqSize, UInt32 *pcActSize)
{
size_t a = 0;
/* First try to set the new frame buffer size. */
NULL,
0,
sizeof(cReqSize),
&cReqSize);
/* Check if it really was set. */
0,
&cSize,
return err;
/* If both sizes are the same, we are done. */
return noErr;
/* If not we have to check the limits of the device. First get the size of
the buffer size range property. */
0,
&cSize,
NULL);
return err;
if (RT_VALID_PTR(pRange))
{
0,
&cSize,
pRange);
{
for (a=0; a < cSize/sizeof(AudioValueRange); ++a)
{
/* Search for the absolute minimum. */
|| cMin == -1)
/* Search for the best maximum which isn't bigger than
cReqSize. */
{
}
}
if (cMax == -1)
/* First try to set the new frame buffer size. */
NULL,
0,
sizeof(cReqSize),
&cReqSize);
/* Check if it really was set. */
0,
&cSize,
}
}
else
return notEnoughMemoryErr;
return err;
}
{
0,
0,
&uSize,
&uFlag);
if (err != kAudioHardwareNoError)
return uFlag >= 1;
}
{
#if 0
/**
* CFStringGetCStringPtr doesn't reliably return requested string instead return depends on "many factors" (not clear which)
* ( please follow the link
* http://developer.apple.com/library/mac/#documentation/CoreFoundation/Reference/CFStringRef/Reference/reference.html
* for more details). Branch below allocates memory using mechanisms which hasn't got single method for memory free:
* RTStrDup - RTStrFree
* RTMemAllocZTag - RTMemFree
* which aren't compatible, opposite to CFStringGetCStringPtr CFStringGetCString has well defined
* behaviour and confident return value.
*/
/* First try to get the pointer directly. */
if (pszTmp)
{
/* On success make a copy */
}
else
{
/* If the pointer isn't available directly, we have to make a copy. */
{
}
}
#else
/* If the pointer isn't available directly, we have to make a copy. */
{
}
#endif
return pszResult;
}
{
/* Create a CFString out of our CString */
/* Fill the translation structure */
uSize = sizeof(AudioValueTranslation);
/* Fetch the translation from the UID to the audio Id */
&uSize,
&translation);
/* Release the temporary CFString */
return audioId;
/* Return the unknown device on error */
return kAudioDeviceUnknown;
}
/*******************************************************************************
*
* Global structures section
*
******************************************************************************/
/* Initialization status indicator used for the recreation of the AudioUnits. */
/* Error code which indicates "End of data" */
struct
{
const char *pszOutputDeviceUID;
const char *pszInputDeviceUID;
} conf =
{
};
typedef struct caVoiceOut
{
/* HW voice output structure defined by VBox */
/* Stream description which is default on the device */
/* Stream description which is selected for using by VBox */
/* The audio device ID of the currently used device */
/* The AudioUnit used */
/* A ring buffer for transferring data to the playback thread */
/* Initialization status tracker. Used when some of the device parameters
* or the device itself is changed during the runtime. */
} caVoiceOut;
typedef struct caVoiceIn
{
/* HW voice input structure defined by VBox */
/* Stream description which is default on the device */
/* Stream description which is selected for using by VBox */
/* The audio device ID of the currently used device */
/* The AudioUnit used */
/* The audio converter if necessary */
/* A temporary position value used in the caConverterCallback function */
/* The ratio between the device & the stream sample rate */
/* An extra buffer used for render the audio data in the recording thread */
/* A ring buffer for transferring data from the recording thread */
/* Initialization status tracker. Used when some of the device parameters
* or the device itself is changed during the runtime. */
} caVoiceIn;
#ifdef CA_EXTENSIVE_LOGGING
# define CA_EXT_DEBUG_LOG(a) Log2(a)
#else
# define CA_EXT_DEBUG_LOG(a) do {} while(0)
#endif
/*******************************************************************************
*
* CoreAudio output section
*
******************************************************************************/
/* We need some forward declarations */
/* Callback for getting notified when the default output device was changed */
void *inClientData)
{
bool fRun = false;
switch (inPropertyID)
{
{
/* This listener is called on every change of the hardware
* device. So check if the default device has really changed. */
&uSize,
&ad);
{
Log2(("CoreAudio: [Output] Default output device changed!\n"));
/* We move the reinitialization to the next output event.
* This make sure this thread isn't blocked and the
* reinitialization is done when necessary only. */
}
break;
}
}
return noErr;
}
/* Callback for getting notified when some of the properties of an audio device has changed */
void *inClientData)
{
switch (inPropertyID)
{
#ifdef DEBUG
{
Log2(("CoreAudio: [Output] Processor overload detected!\n"));
break;
}
#endif /* DEBUG */
default: break;
}
return noErr;
}
/* Callback to feed audio output buffer */
const AudioTimeStamp* inTimeStamp,
{
return noErr;
/* How much space is used in the ring buffer? */
/* How much space is available in the core audio buffer. Use the smaller
* size of the too. */
CA_EXT_DEBUG_LOG(("CoreAudio: [Output] Start reading buffer with %RU32 samples (%RU32 bytes)\n", csAvail, csAvail << caVoice->hw.info.shift));
/* Iterate as long as data is available */
{
/* How much is left? */
CA_EXT_DEBUG_LOG(("CoreAudio: [Output] Try reading %RU32 samples (%RU32 bytes)\n", csToRead, cbToRead));
/* Try to acquire the necessary block from the ring buffer. */
/* How much to we get? */
CA_EXT_DEBUG_LOG(("CoreAudio: [Output] There are %RU32 samples (%RU32 bytes) available\n", csToRead, cbToRead));
/* Break if nothing is used anymore. */
if (RT_UNLIKELY(cbToRead == 0))
break;
/* Copy the data from our ring buffer to the core audio buffer. */
/* Release the read buffer, so it could be used for new data. */
/* How much have we reads so far. */
}
/* Write the bytes to the core audio buffer which where really written. */
CA_EXT_DEBUG_LOG(("CoreAudio: [Output] Finished reading buffer with %RU32 samples (%RU32 bytes)\n", csReads, csReads << caVoice->hw.info.shift));
return noErr;
}
{
{
/* Fetch the default audio output device currently in use */
&uSize,
&caVoice->audioDeviceId);
{
return -1;
}
}
/* Try to get the name of the output device and log it. It's not fatal if
* it fails. */
uSize = sizeof(CFStringRef);
0,
0,
&uSize,
&name);
{
0,
0,
&uSize,
&name);
{
}
}
else
/* Get the default frames buffer size, so that we can setup our internal
* buffers. */
0,
false,
&uSize,
&cFrames);
{
return -1;
}
the device. */
false,
&cFrames);
{
return -1;
}
cd.componentFlags = 0;
cd.componentFlagsMask = 0;
/* Try to find the default HAL output component. */
if (RT_UNLIKELY(cp == 0))
{
LogRel(("CoreAudio: [Output] Failed to find HAL output component\n"));
return -1;
}
/* Open the default HAL output component. */
{
return -1;
}
/* Switch the I/O mode for output to on. */
uFlag = 1;
0,
&uFlag,
sizeof(uFlag));
{
return -1;
}
/* Set the default audio output device as the device for the new AudioUnit. */
0,
sizeof(caVoice->audioDeviceId));
{
return -1;
}
/* CoreAudio will inform us on a second thread when it needs more data for
* output. Therefor register an callback function which will provide the new
* data. */
0,
&cb,
sizeof(cb));
{
return -1;
}
/* Set the quality of the output render to the maximum. */
/* uFlag = kRenderQuality_High;*/
/* err = AudioUnitSetProperty(caVoice->audioUnit,*/
/* kAudioUnitProperty_RenderQuality,*/
/* kAudioUnitScope_Global,*/
/* 0,*/
/* &uFlag,*/
/* sizeof(uFlag));*/
/* Not fatal */
/* if (RT_UNLIKELY(err != noErr))*/
/* LogRel(("CoreAudio: [Output] Failed to set the render quality to the maximum (%RI32)\n", err));*/
/* Fetch the current stream format of the device. */
0,
&uSize);
{
return -1;
}
/* Create an AudioStreamBasicDescription based on the audio settings of
* VirtualBox. */
#if DEBUG
#endif /* DEBUG */
/* Set the device format description for the stream. */
0,
sizeof(caVoice->streamFormat));
{
return -1;
}
0,
&uSize);
{
return -1;
}
/* Also set the frame buffer size off the device on our AudioUnit. This
should make sure that the frames count which we receive in the render
thread is as we like. */
0,
&cFrames,
sizeof(cFrames));
{
LogRel(("CoreAudio: [Output] Failed to set maximum frame buffer size on the AudioUnit (%RI32)\n", err));
return -1;
}
/* Finally initialize the new AudioUnit. */
{
return -1;
}
/* There are buggy devices (e.g. my Bluetooth headset) which doesn't honor
* the frame buffer size set in the previous calls. So finally get the
* frame buffer size after the AudioUnit was initialized. */
0,
&cFrames,
&uSize);
{
LogRel(("CoreAudio: [Output] Failed to get maximum frame buffer size from the AudioUnit (%RI32)\n", err));
return -1;
}
/* Create the internal ring buffer. */
{
LogRel(("CoreAudio: [Output] Failed to create internal ring buffer\n"));
return -1;
}
LogRel(("CoreAudio: [Output] Warning! After recreation, the CoreAudio ring buffer doesn't has the same size as the device buffer (%RU32 vs. %RU32).\n", cSamples, (uint32_t)hw->samples));
#ifdef DEBUG
0,
false,
caVoice);
/* Not Fatal */
#endif /* DEBUG */
return 0;
}
{
}
{
/* Check if the audio device should be reinitialized. If so do it. */
/* We return the live count in the case we are not initialized. This should
* prevent any under runs. */
return audio_pcm_hw_get_live_out(hw);
/* Make sure the device is running */
/* How much space is available in the ring buffer */
/* How much data is available. Use the smaller size of the too. */
CA_EXT_DEBUG_LOG(("CoreAudio: [Output] Start writing buffer with %RU32 samples (%RU32 bytes)\n", csAvail, csAvail << hw->info.shift));
/* Iterate as long as data is available */
{
/* How much is left? Split request at the end of our samples buffer. */
CA_EXT_DEBUG_LOG(("CoreAudio: [Output] Try writing %RU32 samples (%RU32 bytes)\n", csToWrite, cbToWrite));
/* Try to acquire the necessary space from the ring buffer. */
/* How much to we get? */
CA_EXT_DEBUG_LOG(("CoreAudio: [Output] There is space for %RU32 samples (%RU32 bytes) available\n", csToWrite, cbToWrite));
/* Break if nothing is free anymore. */
if (RT_UNLIKELY(cbToWrite == 0))
break;
/* Copy the data from our mix buffer to the ring buffer. */
/* Release the ring buffer, so the read thread could start reading this data. */
/* How much have we written so far. */
}
CA_EXT_DEBUG_LOG(("CoreAudio: [Output] Finished writing buffer with %RU32 samples (%RU32 bytes)\n", csWritten, csWritten << hw->info.shift));
/* Return the count of samples we have processed. */
return csWritten;
}
{
}
{
if (!( status == CA_STATUS_INIT
|| status == CA_STATUS_REINIT))
return 0;
switch (cmd)
{
case VOICE_ENABLE:
{
/* Only start the device if it is actually stopped */
{
0);
{
return -1;
}
}
break;
}
case VOICE_DISABLE:
{
/* Only stop the device if it is actually running */
{
{
return -1;
}
0);
{
return -1;
}
}
break;
}
}
return 0;
}
{
int rc = 0;
if (!( status == CA_STATUS_INIT
|| status == CA_STATUS_REINIT))
return;
{
#ifdef DEBUG
0,
false,
/* Not Fatal */
#endif /* DEBUG */
{
{
}
else
}
else
}
else
}
{
int rc = 0;
bool fDeviceByUser = false; /* use we a device which was set by the user? */
/* Initialize the hardware info section with the audio settings */
/* Try to find the audio device set by the user. Use
* export VBOX_COREAUDIO_OUTPUT_DEVICE_UID=AppleHDAEngineOutput:0
* to set it. */
if (conf.pszOutputDeviceUID)
{
/* Not fatal */
LogRel(("CoreAudio: [Output] Unable to find output device %s. Falling back to the default audio device. \n", conf.pszOutputDeviceUID));
else
fDeviceByUser = true;
}
if (RT_UNLIKELY(rc != 0))
return rc;
/* The samples have to correspond to the internal ring buffer size. */
hw->samples = (IORingBufferSize(caVoice->pBuf) >> hw->info.shift) / caVoice->streamFormat.mChannelsPerFrame;
/* When the devices isn't forced by the user, we want default device change
* notifications. */
if (!fDeviceByUser)
{
caVoice);
/* Not Fatal */
}
return 0;
}
/*******************************************************************************
*
* CoreAudio input section
*
******************************************************************************/
/* We need some forward declarations */
/* Callback for getting notified when the default input device was changed */
void *inClientData)
{
bool fRun = false;
switch (inPropertyID)
{
{
/* This listener is called on every change of the hardware
* device. So check if the default device has really changed. */
&uSize,
&ad);
{
Log2(("CoreAudio: [Input] Default input device changed!\n"));
/* We move the reinitialization to the next input event.
* This make sure this thread isn't blocked and the
* reinitialization is done when necessary only. */
}
break;
}
}
return noErr;
}
/* Callback for getting notified when some of the properties of an audio device has changed */
void *inClientData)
{
switch (inPropertyID)
{
#ifdef DEBUG
{
Log2(("CoreAudio: [Input] Processor overload detected!\n"));
break;
}
#endif /* DEBUG */
{
Log2(("CoreAudio: [Input] Sample rate changed!\n"));
/* We move the reinitialization to the next input event.
* This make sure this thread isn't blocked and the
* reinitialization is done when necessary only. */
break;
}
default: break;
}
return noErr;
}
/* Callback to convert audio input data from one format to another */
void *inUserData)
{
/* In principle we had to check here if the source is non interleaved & if
* so go through all buffers not only the first one like now. */
return noErr;
/* Log2(("converting .... ################ %RU32 %RU32 %RU32 %RU32 %RU32\n", *ioNumberDataPackets, bufferList->mBuffers[i].mNumberChannels, bufferList->mNumberBuffers, bufferList->mBuffers[i].mDataByteSize, ioData->mNumberBuffers));*/
/* Use the lower one of the packets to process & the available packets in
* the buffer */
/* Set the new size on output, so the caller know what we have processed. */
/* If no data is available anymore we return with an error code. This error
* code will be returned from AudioConverterFillComplexBuffer. */
if (*ioNumberDataPackets == 0)
{
return caConverterEOFDErr;
}
else
{
/* Log2(("converting .... ################ %RU32 %RU32\n", size, caVoice->rpos));*/
}
return noErr;
}
/* Callback to feed audio input buffer */
const AudioTimeStamp* inTimeStamp,
{
return noErr;
/* If nothing is pending return immediately. */
if (inNumberFrames == 0)
return noErr;
/* Are we using an converter? */
{
/* Firstly render the data as usual */
caVoice->bufferList.mBuffers[0].mDataByteSize = caVoice->deviceFormat.mBytesPerFrame * inNumberFrames;
&caVoice->bufferList);
{
return err;
}
/* How much space is free in the ring buffer? */
/* How much space is used in the core audio buffer. Use the smaller size of
* the too. */
csAvail = RT_MIN(csAvail, (uint32_t)((caVoice->bufferList.mBuffers[0].mDataByteSize / caVoice->deviceFormat.mBytesPerFrame) * caVoice->sampleRatio));
CA_EXT_DEBUG_LOG(("CoreAudio: [Input] Start writing buffer with %RU32 samples (%RU32 bytes)\n", csAvail, csAvail << caVoice->hw.info.shift));
/* Initialize the temporary output buffer */
/* Set the read position to zero. */
/* Iterate as long as data is available */
{
/* How much is left? */
CA_EXT_DEBUG_LOG(("CoreAudio: [Input] Try writing %RU32 samples (%RU32 bytes)\n", csToWrite, cbToWrite));
/* Try to acquire the necessary space from the ring buffer. */
/* How much to we get? */
CA_EXT_DEBUG_LOG(("CoreAudio: [Input] There is space for %RU32 samples (%RU32 bytes) available\n", csToWrite, cbToWrite));
/* Break if nothing is free anymore. */
if (RT_UNLIKELY(cbToWrite == 0))
break;
/* Now set how much space is available for output */
/* Set our ring buffer as target. */
&tmpList,
NULL);
&& err != caConverterEOFDErr)
{
Log(("CoreAudio: [Input] Failed to convert audio data (%RI32:%c%c%c%c)\n", err, RT_BYTE4(err), RT_BYTE3(err), RT_BYTE2(err), RT_BYTE1(err)));
break;
}
/* Check in any case what processed size is returned. It could be
* much littler than we expected. */
/* Release the ring buffer, so the main thread could start reading this data. */
/* If the error is "End of Data" it means there is no data anymore
* which could be converted. So end here now. */
if (err == caConverterEOFDErr)
break;
}
/* Cleanup */
CA_EXT_DEBUG_LOG(("CoreAudio: [Input] Finished writing buffer with %RU32 samples (%RU32 bytes)\n", csWritten, csWritten << caVoice->hw.info.shift));
}
else
{
caVoice->bufferList.mBuffers[0].mDataByteSize = caVoice->streamFormat.mBytesPerFrame * inNumberFrames;
&caVoice->bufferList);
{
return err;
}
/* How much space is free in the ring buffer? */
/* How much space is used in the core audio buffer. Use the smaller size of
* the too. */
CA_EXT_DEBUG_LOG(("CoreAudio: [Input] Start writing buffer with %RU32 samples (%RU32 bytes)\n", csAvail, csAvail << caVoice->hw.info.shift));
/* Iterate as long as data is available */
{
/* How much is left? */
CA_EXT_DEBUG_LOG(("CoreAudio: [Input] Try writing %RU32 samples (%RU32 bytes)\n", csToWrite, cbToWrite));
/* Try to acquire the necessary space from the ring buffer. */
/* How much to we get? */
CA_EXT_DEBUG_LOG(("CoreAudio: [Input] There is space for %RU32 samples (%RU32 bytes) available\n", csToWrite, cbToWrite));
/* Break if nothing is free anymore. */
if (RT_UNLIKELY(cbToWrite == 0))
break;
/* Copy the data from the core audio buffer to the ring buffer. */
memcpy(pcDst, (char*)caVoice->bufferList.mBuffers[0].mData + (csWritten << caVoice->hw.info.shift), cbToWrite);
/* Release the ring buffer, so the main thread could start reading this data. */
}
/* Cleanup */
CA_EXT_DEBUG_LOG(("CoreAudio: [Input] Finished writing buffer with %RU32 samples (%RU32 bytes)\n", csWritten, csWritten << caVoice->hw.info.shift));
}
return err;
}
{
int rc = -1;
{
/* Fetch the default audio output device currently in use */
&uSize,
&caVoice->audioDeviceId);
{
return -1;
}
}
/* Try to get the name of the input device and log it. It's not fatal if
* it fails. */
uSize = sizeof(CFStringRef);
0,
0,
&uSize,
&name);
{
0,
0,
&uSize,
&name);
{
if (pszUID)
}
if (pszName)
}
else
/* Get the default frames buffer size, so that we can setup our internal
* buffers. */
0,
true,
&uSize,
&cFrames);
{
return -1;
}
the device. */
true,
&cFrames);
{
return -1;
}
cd.componentFlags = 0;
cd.componentFlagsMask = 0;
/* Try to find the default HAL output component. */
if (RT_UNLIKELY(cp == 0))
{
LogRel(("CoreAudio: [Input] Failed to find HAL output component\n"));
return -1;
}
/* Open the default HAL output component. */
{
return -1;
}
/* Switch the I/O mode for input to on. */
uFlag = 1;
1,
&uFlag,
sizeof(uFlag));
{
return -1;
}
/* Switch the I/O mode for output to off. This is important, as this is a
* pure input stream. */
uFlag = 0;
0,
&uFlag,
sizeof(uFlag));
{
return -1;
}
/* Set the default audio input device as the device for the new AudioUnit. */
0,
sizeof(caVoice->audioDeviceId));
{
return -1;
}
/* CoreAudio will inform us on a second thread for new incoming audio data.
* Therefor register an callback function, which will process the new data.
* */
0,
&cb,
sizeof(cb));
{
return -1;
}
/* Fetch the current stream format of the device. */
1,
&uSize);
{
return -1;
}
/* Create an AudioStreamBasicDescription based on the audio settings of
* VirtualBox. */
#if DEBUG
#endif /* DEBUG */
/* If the frequency of the device is different from the requested one we
* need a converter. The same count if the number of channels is different. */
{
{
return -1;
}
{
/* If the channel count is different we have to tell this the converter
and supply a channel mapping. For now we only support mapping
from mono to stereo. For all other cases the core audio defaults
are used, which means dropping additional channels in most
cases. */
sizeof(channelMap),
{
LogRel(("CoreAudio: [Input] Failed to add a channel mapper to the audio converter (%RI32)\n", err));
return -1;
}
}
/* Set sample rate converter quality to maximum */
/* uFlag = kAudioConverterQuality_Max;*/
/* err = AudioConverterSetProperty(caVoice->converter,*/
/* kAudioConverterSampleRateConverterQuality,*/
/* sizeof(uFlag),*/
/* &uFlag);*/
/* Not fatal */
/* if (RT_UNLIKELY(err != noErr))*/
/* LogRel(("CoreAudio: [Input] Failed to set the audio converter quality to the maximum (%RI32)\n", err));*/
Log(("CoreAudio: [Input] Converter in use\n"));
/* Set the new format description for the stream. */
1,
sizeof(caVoice->deviceFormat));
{
return -1;
}
1,
sizeof(caVoice->deviceFormat));
{
return -1;
}
}
else
{
/* Set the new format description for the stream. */
1,
sizeof(caVoice->streamFormat));
{
return -1;
}
}
/* Also set the frame buffer size off the device on our AudioUnit. This
should make sure that the frames count which we receive in the render
thread is as we like. */
1,
&cFrames,
sizeof(cFrames));
{
LogRel(("CoreAudio: [Input] Failed to set maximum frame buffer size on the AudioUnit (%RI32)\n", err));
return -1;
}
/* Finally initialize the new AudioUnit. */
{
return -1;
}
1,
&uSize);
{
return -1;
}
/* There are buggy devices (e.g. my Bluetooth headset) which doesn't honor
* the frame buffer size set in the previous calls. So finally get the
* frame buffer size after the AudioUnit was initialized. */
0,
&cFrames,
&uSize);
{
LogRel(("CoreAudio: [Input] Failed to get maximum frame buffer size from the AudioUnit (%RI32)\n", err));
return -1;
}
/* Calculate the ratio between the device and the stream sample rate. */
/* Set to zero first */
/* Create the AudioBufferList structure with one buffer. */
/* Initialize the buffer to nothing. */
/* Make sure that the ring buffer is big enough to hold the recording
* data. Compare the maximum frames per slice value with the frames
* necessary when using the converter where the sample rate could differ.
* The result is always multiplied by the channels per frame to get the
* samples count. */
(cFrames * caVoice->deviceFormat.mBytesPerFrame * caVoice->sampleRatio) / caVoice->streamFormat.mBytesPerFrame)
LogRel(("CoreAudio: [Input] Warning! After recreation, the CoreAudio ring buffer doesn't has the same size as the device buffer (%RU32 vs. %RU32).\n", cSamples, (uint32_t)hw->samples));
/* Create the internal ring buffer. */
rc = 0;
else
LogRel(("CoreAudio: [Input] Failed to create internal ring buffer\n"));
if (rc != 0)
{
return -1;
}
#ifdef DEBUG
0,
true,
caVoice);
/* Not Fatal */
#endif /* DEBUG */
0,
true,
caVoice);
/* Not Fatal */
return 0;
}
{
}
{
char *pcSrc;
/* Check if the audio device should be reinitialized. If so do it. */
return 0;
/* How much space is used in the ring buffer? */
/* How much space is available in the mix buffer. Use the smaller size of
* the too. */
CA_EXT_DEBUG_LOG(("CoreAudio: [Input] Start reading buffer with %RU32 samples (%RU32 bytes)\n", csAvail, csAvail << caVoice->hw.info.shift));
/* Iterate as long as data is available */
{
/* How much is left? Split request at the end of our samples buffer. */
CA_EXT_DEBUG_LOG(("CoreAudio: [Input] Try reading %RU32 samples (%RU32 bytes)\n", csToRead, cbToRead));
/* Try to acquire the necessary block from the ring buffer. */
/* How much to we get? */
CA_EXT_DEBUG_LOG(("CoreAudio: [Input] There are %RU32 samples (%RU32 bytes) available\n", csToRead, cbToRead));
/* Break if nothing is used anymore. */
if (cbToRead == 0)
break;
/* Copy the data from our ring buffer to the mix buffer. */
/* Release the read buffer, so it could be used for new data. */
/* How much have we reads so far. */
}
CA_EXT_DEBUG_LOG(("CoreAudio: [Input] Finished reading buffer with %RU32 samples (%RU32 bytes)\n", csReads, csReads << caVoice->hw.info.shift));
return csReads;
}
{
}
{
if (!( status == CA_STATUS_INIT
|| status == CA_STATUS_REINIT))
return 0;
switch (cmd)
{
case VOICE_ENABLE:
{
/* Only start the device if it is actually stopped */
{
}
{
return -1;
}
break;
}
case VOICE_DISABLE:
{
/* Only stop the device if it is actually running */
{
{
return -1;
}
0);
{
return -1;
}
}
break;
}
}
return 0;
}
{
int rc = 0;
if (!( status == CA_STATUS_INIT
|| status == CA_STATUS_REINIT))
return;
{
#ifdef DEBUG
0,
true,
/* Not Fatal */
#endif /* DEBUG */
0,
true,
/* Not Fatal */
{
}
{
{
}
else
}
else
}
else
}
{
int rc = -1;
bool fDeviceByUser = false;
/* Initialize the hardware info section with the audio settings */
/* Try to find the audio device set by the user */
if (conf.pszInputDeviceUID)
{
/* Not fatal */
LogRel(("CoreAudio: [Input] Unable to find input device %s. Falling back to the default audio device. \n", conf.pszInputDeviceUID));
else
fDeviceByUser = true;
}
if (RT_UNLIKELY(rc != 0))
return rc;
/* The samples have to correspond to the internal ring buffer size. */
hw->samples = (IORingBufferSize(caVoice->pBuf) >> hw->info.shift) / caVoice->streamFormat.mChannelsPerFrame;
/* When the devices isn't forced by the user, we want default device change
* notifications. */
if (!fDeviceByUser)
{
caVoice);
/* Not Fatal */
}
return 0;
}
/*******************************************************************************
*
* CoreAudio global section
*
******************************************************************************/
static void *coreaudio_audio_init(void)
{
return &conf;
}
static void coreaudio_audio_fini(void *opaque)
{
}
static struct audio_option coreaudio_options[] =
{
"UID of the output device to use", NULL, 0},
"UID of the input device to use", NULL, 0},
};
static struct audio_pcm_ops coreaudio_pcm_ops =
{
};
struct audio_driver coreaudio_audio_driver =
{
INIT_FIELD(descr =)
"CoreAudio http://developer.apple.com/audio/coreaudio.html",
};