coreaudio.c revision 96f1796553fc6784e763caad6545a80ec95d3aac
/* $Id$ */
/** @file
* VBox audio devices: Mac OS X CoreAudio audio driver
*/
/*
* Copyright (C) 2010 Sun Microsystems, Inc.
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
* Clara, CA 95054 USA or visit http://www.sun.com if you need
* additional information or have any questions.
*/
#define LOG_GROUP LOG_GROUP_DEV_AUDIO
#define AUDIO_CAP "coreaudio"
#include "vl_vbox.h"
#include "audio.h"
#include "audio_int.h"
#include <CoreAudio/CoreAudio.h>
#include <CoreServices/CoreServices.h>
#include <AudioUnit/AudioUnit.h>
#include <AudioToolbox/AudioConverter.h>
/* todo:
* - checking for properties changes of the devices
* - checking for changing of the default device
* - let the user set the device used (use config)
* - try to set frame size (use config)
*/
/* Most of this is based on:
*/
/*******************************************************************************
*
* IO Ring Buffer section
*
******************************************************************************/
/* Implementation of a lock free ring buffer which could be used in a multi
* threaded environment. Note that only the acquire, release and getter
* functions are threading aware. So don't use reset if the ring buffer is
* still in use. */
typedef struct IORINGBUFFER
{
/* The current read position in the buffer */
/* The current write position in the buffer */
/* How much space of the buffer is currently in use */
volatile uint32_t cBufferUsed;
/* How big is the buffer */
/* The buffer itself */
char *pBuffer;
} IORINGBUFFER;
/* Pointer to an ring buffer structure */
typedef IORINGBUFFER* PIORINGBUFFER;
{
if (pTmpBuffer)
{
if(pTmpBuffer->pBuffer)
{
*ppBuffer = pTmpBuffer;
}
else
}
}
{
if (pBuffer)
{
}
}
{
pBuffer->cBufferUsed = 0;
}
{
}
{
return pBuffer->cBufferUsed;
}
{
}
static void IORingBufferAquireReadBlock(PIORINGBUFFER pBuffer, uint32_t cReqSize, char **ppStart, uint32_t *pcSize)
{
*ppStart = 0;
*pcSize = 0;
/* How much is in use? */
if (uUsed > 0)
{
/* Get the size out of the requested size, the read block till the end
* of the buffer & the currently used size. */
if (uSize > 0)
{
/* Return the pointer address which point to the current read
* position. */
}
}
}
{
/* Split at the end of the buffer. */
}
static void IORingBufferAquireWriteBlock(PIORINGBUFFER pBuffer, uint32_t cReqSize, char **ppStart, uint32_t *pcSize)
{
*ppStart = 0;
*pcSize = 0;
/* How much is free? */
if (uFree > 0)
{
/* Get the size out of the requested size, the write block till the end
* of the buffer & the currently free size. */
if (uSize > 0)
{
/* Return the pointer address which point to the current write
* position. */
}
}
}
{
/* Split at the end of the buffer. */
}
/*******************************************************************************
*
* Helper function section
*
******************************************************************************/
#if DEBUG
static void caDebugOutputAudioStreamBasicDescription(const char *pszDesc, const AudioStreamBasicDescription *pStreamDesc)
{
char pszSampleRate[32];
Log(("CoreAudio: Format ID: %RU32 (%c%c%c%c)\n", pStreamDesc->mFormatID, RT_BYTE4(pStreamDesc->mFormatID), RT_BYTE3(pStreamDesc->mFormatID), RT_BYTE2(pStreamDesc->mFormatID), RT_BYTE1(pStreamDesc->mFormatID)));
Log((" Float"));
Log((" BigEndian"));
Log((" SignedInteger"));
Log((" Packed"));
Log((" AlignedHigh"));
Log((" NonInterleaved"));
Log((" NonMixable"));
Log((" AllClear"));
Log(("\n"));
}
#endif /* DEBUG */
static void caAudioSettingsToAudioStreamBasicDescription(const audsettings_t *pAS, AudioStreamBasicDescription *pStreamDesc)
{
{
case AUD_FMT_U8:
{
break;
}
case AUD_FMT_S8:
{
break;
}
case AUD_FMT_U16:
{
break;
}
case AUD_FMT_S16:
{
break;
}
#ifdef PA_SAMPLE_S32LE
case AUD_FMT_U32:
{
break;
}
case AUD_FMT_S32:
{
break;
}
#endif
default:
break;
}
}
static OSStatus caSetFrameBufferSize(AudioDeviceID device, bool fInput, UInt32 cReqSize, UInt32 *pcActSize)
{
size_t a = 0;
/* First try to set the new frame buffer size. */
NULL,
0,
sizeof(cReqSize),
&cReqSize);
/* Check if it really was set. */
0,
&cSize,
return err;
/* If both sizes are the same, we are done. */
return noErr;
/* If not we have to check the limits of the device. First get the size of
the buffer size range property. */
0,
&cSize,
NULL);
return err;
{
0,
&cSize,
pRange);
{
for (a=0; a < cSize/sizeof(AudioValueRange); ++a)
{
/* Search for the absolute minimum. */
|| cMin == -1)
/* Search for the best maximum which isn't bigger than
cReqSize. */
{
}
}
if (cMax == -1)
/* First try to set the new frame buffer size. */
NULL,
0,
sizeof(cReqSize),
&cReqSize);
/* Check if it really was set. */
0,
&cSize,
}
}
else
return notEnoughMemoryErr;
return err;
}
{
0,
0,
&uSize,
&uFlag);
if (err != kAudioHardwareNoError)
return uFlag >= 1;
}
/*******************************************************************************
*
* Global structures section
*
******************************************************************************/
struct
{
int cBufferFrames;
} conf =
{
};
typedef struct caVoiceOut
{
/* HW voice output structure defined by VBox */
/* Stream description which is default on the device */
/* Stream description which is selected for using by VBox */
/* The audio device ID of the currently used device */
/* The AudioUnit used */
/* A ring buffer for transferring data to the playback thread */
} caVoiceOut;
typedef struct caVoiceIn
{
/* HW voice input structure defined by VBox */
/* Stream description which is default on the device */
/* Stream description which is selected for using by VBox */
/* The audio device ID of the currently used device */
/* The AudioUnit used */
/* The audio converter if necessary */
/* A temporary position value used in the caConverterCallback function */
/* The ratio between the device & the stream sample rate */
/* An extra buffer used for render the audio data in the recording thread */
/* A ring buffer for transferring data from the recording thread */
} caVoiceIn;
/* Error code which indicates "End of data" */
/*******************************************************************************
*
* CoreAudio output section
*
******************************************************************************/
/* callback to feed audio output buffer */
const AudioTimeStamp* inTimeStamp,
{
/* How much space is used in the ring buffer? */
/* How much space is available in the core audio buffer. Use the smaller
* size of the too. */
Log2(("CoreAudio: [Output] Start reading buffer with %RU32 samples (%RU32 bytes)\n", csAvail, csAvail << caVoice->hw.info.shift));
/* Iterate as long as data is available */
{
/* How much is left? */
/* Try to aquire the necessary block from the ring buffer. */
/* How much to we get? */
Log2(("CoreAudio: [Output] There are %RU32 samples (%RU32 bytes) available\n", csToRead, cbToRead));
/* Break if nothing is used anymore. */
if (RT_UNLIKELY(cbToRead == 0))
break;
/* Copy the data from our ring buffer to the core audio buffer. */
/* Release the read buffer, so it could be used for new data. */
/* How much have we reads so far. */
}
/* Write the bytes to the core audio buffer which where really written. */
Log2(("CoreAudio: [Output] Finished reading buffer with %RU32 samples (%RU32 bytes)\n", csReads, csReads << caVoice->hw.info.shift));
return noErr;
}
{
/* How much space is available in the ring buffer */
/* How much data is availabe. Use the smaller size of the too. */
Log2(("CoreAudio: [Output] Start writing buffer with %RU32 samples (%RU32 bytes)\n", csAvail, csAvail << hw->info.shift));
/* Iterate as long as data is available */
{
/* How much is left? Split request at the end of our samples buffer. */
/* Try to aquire the necessary space from the ring buffer. */
/* How much to we get? */
Log2(("CoreAudio: [Output] There is space for %RU32 samples (%RU32 bytes) available\n", csToWrite, cbToWrite));
/* Break if nothing is free anymore. */
if (RT_UNLIKELY(cbToWrite == 0))
break;
/* Copy the data from our mix buffer to the ring buffer. */
/* Release the ring buffer, so the read thread could start reading this data. */
/* How much have we written so far. */
}
Log2(("CoreAudio: [Output] Finished writing buffer with %RU32 samples (%RU32 bytes)\n", csWritten, csWritten << hw->info.shift));
/* Return the count of samples we have processed. */
return csWritten;
}
{
}
{
switch (cmd)
{
case VOICE_ENABLE:
{
/* Only start the device if it is actually stopped */
{
{
return -1;
}
}
break;
}
case VOICE_DISABLE:
{
/* Only stop the device if it is actually running */
{
{
return -1;
}
0);
{
return -1;
}
}
break;
}
}
return 0;
}
{
const char *pszName;
/* Initialize the hardware info section with the audio settings */
/* Fetch the default audio output device currently in use */
&uSize,
&caVoice->audioDeviceId);
{
return -1;
}
/* Try to get the name of the default output device and log it. It's not
* fatal if it fails. */
uSize = sizeof(CFStringRef);
0,
0,
&uSize,
&name);
{
if (pszName)
}
else
/* Get the default frames buffer size, so that we can setup our internal
* buffers. */
0,
false,
&uSize,
&cFrames);
{
return -1;
}
the device. */
false,
&cFrames);
{
return -1;
}
cd.componentFlags = 0;
cd.componentFlagsMask = 0;
/* Try to find the default HAL output component. */
if (RT_UNLIKELY(cp == 0))
{
LogRel(("CoreAudio: [Output] Failed to find HAL output component\n"));
return -1;
}
/* Open the default HAL output component. */
{
return -1;
}
/* Switch the I/O mode for output to on. */
uFlag = 1;
0,
&uFlag,
sizeof(uFlag));
{
return -1;
}
/* Set the default audio output device as the device for the new AudioUnit. */
0,
sizeof(caVoice->audioDeviceId));
{
return -1;
}
/* CoreAudio will inform us on a second thread when it needs more data for
* output. Therefor register an callback function which will provide the new
* data. */
0,
&cb,
sizeof(cb));
{
return -1;
}
/* Set the quality of the output render to the maximum. */
/* uFlag = kRenderQuality_High;*/
/* err = AudioUnitSetProperty(caVoice->audioUnit,*/
/* kAudioUnitProperty_RenderQuality,*/
/* kAudioUnitScope_Global,*/
/* 0,*/
/* &uFlag,*/
/* sizeof(uFlag));*/
/* Not fatal */
/* if (RT_UNLIKELY(err != noErr))*/
/* LogRel(("CoreAudio: [Output] Failed to set the render quality to the maximum (%RI32)\n", err));*/
/* Fetch the current stream format of the device. */
0,
&uSize);
{
return -1;
}
/* Create an AudioStreamBasicDescription based on the audio settings of
* VirtualBox. */
#if DEBUG
#endif /* DEBUG */
/* Set the device format description for the stream. */
0,
sizeof(caVoice->streamFormat));
{
return -1;
}
0,
&uSize);
{
return -1;
}
caDebugOutputAudioStreamBasicDescription("CoreAudio: [Output] device again", &caVoice->deviceFormat);
/* Also set the frame buffer size off the device on our AudioUnit. This
should make sure that the frames count which we receive in the render
thread is as we like. */
0,
&cFrames,
sizeof(cFrames));
{
LogRel(("CoreAudio: [Output] Failed to set maximum frame buffer size on the AudioUnit (%RI32)\n", err));
return -1;
}
/* Finally initialize the new AudioUnit. */
{
return -1;
}
/* There are buggy devices (e.g. my bluetooth headset) which doesn't honor
* the frame buffer size set in the previous calls. So finally get the
* frame buffer size after the AudioUnit was initialized. */
0,
&cFrames,
&uSize);
{
LogRel(("CoreAudio: [Output] Failed to get maximum frame buffer size from the AudioUnit (%RI32)\n", err));
return -1;
}
/* Create the internal ring buffer. */
{
LogRel(("CoreAudio: [Output] Failed to create internal ring buffer\n"));
return -1;
}
return 0;
}
{
int rc = 0;
{
{
{
}
else
}
else
}
else
}
/*******************************************************************************
*
* CoreAudio input section
*
******************************************************************************/
/* callback to convert audio input data from one format to another */
void *inUserData)
{
/* In principle we had to check here if the source is non interleaved & if
* so go through all buffers not only the first one like now. */
/* Log2(("converting .... ################ %RU32 %RU32 %RU32 %RU32 %RU32\n", *ioNumberDataPackets, bufferList->mBuffers[i].mNumberChannels, bufferList->mNumberBuffers, bufferList->mBuffers[i].mDataByteSize, ioData->mNumberBuffers));*/
/* Use the lower one of the packets to process & the available packets in
* the buffer */
/* Set the new size on output, so the caller know what we have processed. */
/* If no data is available anymore we return with an error code. This error
* code will be returned from AudioConverterFillComplexBuffer. */
if (*ioNumberDataPackets == 0)
{
return caConverterEOFDErr;
}
else
{
/* Log2(("converting .... ################ %RU32 %RU32\n", size, caVoice->rpos));*/
}
return noErr;
}
/* callback to feed audio input buffer */
const AudioTimeStamp* inTimeStamp,
{
/* If nothing is pending return immediately. */
if (inNumberFrames == 0)
return noErr;
/* Are we using an converter? */
{
/* Firstly render the data as usual */
caVoice->bufferList.mBuffers[0].mDataByteSize = caVoice->deviceFormat.mBytesPerFrame * inNumberFrames;
&caVoice->bufferList);
{
return err;
}
/* How much space is free in the ring buffer? */
/* How much space is used in the core audio buffer. Use the smaller size of
* the too. */
csAvail = RT_MIN(csAvail, (uint32_t)((caVoice->bufferList.mBuffers[0].mDataByteSize / caVoice->deviceFormat.mBytesPerFrame) * caVoice->sampleRatio));
Log2(("CoreAudio: [Input] Start writing buffer with %RU32 samples (%RU32 bytes)\n", csAvail, csAvail << caVoice->hw.info.shift));
/* Initialize the temporary output buffer */
/* Set the read position to zero. */
/* Iterate as long as data is available */
{
/* How much is left? */
/* Try to acquire the necessary space from the ring buffer. */
/* How much to we get? */
Log2(("CoreAudio: [Input] There is space for %RU32 samples (%RU32 bytes) available\n", csToWrite, cbToWrite));
/* Break if nothing is free anymore. */
if (RT_UNLIKELY(cbToWrite == 0))
break;
/* Now set how much space is available for output */
/* Set our ring buffer as target. */
&tmpList,
NULL);
&& err != caConverterEOFDErr)
{
Log(("CoreAudio: [Input] Failed to convert audio data (%RI32:%c%c%c%c)\n", err, RT_BYTE4(err), RT_BYTE3(err), RT_BYTE2(err), RT_BYTE1(err)));
break;
}
/* Check in any case what processed size is returned. It could be
* much littler than we expected. */
/* Release the ring buffer, so the main thread could start reading this data. */
/* If the error is "End of Data" it means there is no data anymore
* which could be converted. So end here now. */
if (err == caConverterEOFDErr)
break;
}
/* Cleanup */
Log2(("CoreAudio: [Input] Finished writing buffer with %RU32 samples (%RU32 bytes)\n", csWritten, csWritten << caVoice->hw.info.shift));
}
else
{
caVoice->bufferList.mBuffers[0].mDataByteSize = caVoice->streamFormat.mBytesPerFrame * inNumberFrames;
&caVoice->bufferList);
{
return err;
}
/* How much space is free in the ring buffer? */
/* How much space is used in the core audio buffer. Use the smaller size of
* the too. */
Log2(("CoreAudio: [Input] Start writing buffer with %RU32 samples (%RU32 bytes)\n", csAvail, csAvail << caVoice->hw.info.shift));
/* Iterate as long as data is available */
{
/* How much is left? */
/* Try to aquire the necessary space from the ring buffer. */
/* How much to we get? */
Log2(("CoreAudio: [Input] There is space for %RU32 samples (%RU32 bytes) available\n", csToWrite, cbToWrite));
/* Break if nothing is free anymore. */
if (RT_UNLIKELY(cbToWrite == 0))
break;
/* Copy the data from the core audio buffer to the ring buffer. */
memcpy(pcDst, (char*)caVoice->bufferList.mBuffers[0].mData + (csWritten << caVoice->hw.info.shift), cbToWrite);
/* Release the ring buffer, so the main thread could start reading this data. */
}
/* Cleanup */
Log2(("CoreAudio: [Input] Finished writing buffer with %RU32 samples (%RU32 bytes)\n", csWritten, csWritten << caVoice->hw.info.shift));
}
return err;
}
{
char *pcSrc;
/* How much space is used in the ring buffer? */
/* How much space is available in the mix buffer. Use the smaller size of
* the too. */
Log2(("CoreAudio: [Input] Start reading buffer with %RU32 samples (%RU32 bytes)\n", csAvail, csAvail << caVoice->hw.info.shift));
/* Iterate as long as data is available */
{
/* How much is left? Split request at the end of our samples buffer. */
/* Try to aquire the necessary block from the ring buffer. */
/* How much to we get? */
/* Break if nothing is used anymore. */
if (cbToRead == 0)
break;
/* Copy the data from our ring buffer to the mix buffer. */
/* Release the read buffer, so it could be used for new data. */
/* How much have we reads so far. */
}
Log2(("CoreAudio: [Input] Finished reading buffer with %RU32 samples (%RU32 bytes)\n", csReads, csReads << caVoice->hw.info.shift));
return csReads;
}
{
}
{
switch (cmd)
{
case VOICE_ENABLE:
{
/* Only start the device if it is actually stopped */
{
}
{
return -1;
}
break;
}
case VOICE_DISABLE:
{
/* Only stop the device if it is actually running */
{
{
return -1;
}
0);
{
return -1;
}
}
break;
}
}
return 0;
}
{
int rc = -1;
const char *pszName;
/* Initialize the hardware info section with the audio settings */
/* Fetch the default audio input device currently in use */
&uSize,
&caVoice->audioDeviceId);
{
return -1;
}
/* Try to get the name of the default input device and log it. It's not
* fatal if it fails. */
uSize = sizeof(CFStringRef);
0,
1,
&uSize,
&name);
{
if (pszName)
}
else
/* Get the default frames buffer size, so that we can setup our internal
* buffers. */
0,
true,
&uSize,
&cFrames);
{
return -1;
}
the device. */
true,
&cFrames);
{
return -1;
}
cd.componentFlags = 0;
cd.componentFlagsMask = 0;
/* Try to find the default HAL output component. */
if (RT_UNLIKELY(cp == 0))
{
LogRel(("CoreAudio: [Input] Failed to find HAL output component\n"));
return -1;
}
/* Open the default HAL output component. */
{
return -1;
}
/* Switch the I/O mode for input to on. */
uFlag = 1;
1,
&uFlag,
sizeof(uFlag));
{
return -1;
}
/* Switch the I/O mode for output to off. This is important, as this is a
* pure input stream. */
uFlag = 0;
0,
&uFlag,
sizeof(uFlag));
{
return -1;
}
/* Set the default audio input device as the device for the new AudioUnit. */
0,
sizeof(caVoice->audioDeviceId));
{
return -1;
}
/* CoreAudio will inform us on a second thread for new incoming audio data.
* Therefor register an callback function, which will process the new data.
* */
0,
&cb,
sizeof(cb));
{
return -1;
}
/* Fetch the current stream format of the device. */
1,
&uSize);
{
return -1;
}
/* Create an AudioStreamBasicDescription based on the audio settings of
* VirtualBox. */
#if DEBUG
#endif /* DEBUG */
/* If the frequency of the device is different from the requested one we
* need a converter. The same count if the number of channels is different. */
{
{
return -1;
}
{
/* If the channel count is different we have to tell this the converter
and supply a channel mapping. For now we only support mapping
from mono to stereo. For all other cases the core audio defaults
are used, which means dropping additional channels in most
cases. */
sizeof(channelMap),
{
LogRel(("CoreAudio: [Input] Failed to add a channel mapper to the audio converter (%RI32)\n", err));
return -1;
}
}
/* Set sample rate converter quality to maximum */
/* uFlag = kAudioConverterQuality_Max;*/
/* err = AudioConverterSetProperty(caVoice->converter,*/
/* kAudioConverterSampleRateConverterQuality,*/
/* sizeof(uFlag),*/
/* &uFlag);*/
/* Not fatal */
/* if (RT_UNLIKELY(err != noErr))*/
/* LogRel(("CoreAudio: [Input] Failed to set the audio converter quality to the maximum (%RI32)\n", err));*/
Log(("CoreAudio: [Input] Converter in use\n"));
/* Set the new format description for the stream. */
1,
sizeof(caVoice->deviceFormat));
{
return -1;
}
1,
sizeof(caVoice->deviceFormat));
{
return -1;
}
}
else
{
/* Set the new format description for the stream. */
1,
sizeof(caVoice->streamFormat));
{
return -1;
}
}
/* Also set the frame buffer size off the device on our AudioUnit. This
should make sure that the frames count which we receive in the render
thread is as we like. */
1,
&cFrames,
sizeof(cFrames));
{
LogRel(("CoreAudio: [Input] Failed to set maximum frame buffer size on the AudioUnit (%RI32)\n", err));
return -1;
}
/* Finally initialize the new AudioUnit. */
{
return -1;
}
1,
&uSize);
{
return -1;
}
/* There are buggy devices (e.g. my bluetooth headset) which doesn't honor
* the frame buffer size set in the previous calls. So finally get the
* frame buffer size after the AudioUnit was initialized. */
0,
&cFrames,
&uSize);
{
LogRel(("CoreAudio: [Input] Failed to get maximum frame buffer size from the AudioUnit (%RI32)\n", err));
return -1;
}
/* Calculate the ratio between the device and the stream sample rate. */
/* Set to zero first */
/* Create the AudioBufferList structure with one buffer. */
/* Initialize the buffer to nothing. */
/* Make sure that the ring buffer is big enough to hold the recording
* data. Compare the maximum frames per slice value with the frames
* necessary when using the converter where the sample rate could differ.
* The result is always multiplied by the channels per frame to get the
* samples count. */
(cFrames * caVoice->deviceFormat.mBytesPerFrame * caVoice->sampleRatio) / caVoice->streamFormat.mBytesPerFrame)
/* Create the internal ring buffer. */
rc = 0;
else
LogRel(("CoreAudio: [Input] Failed to create internal ring buffer\n"));
if (rc != 0)
{
}
return 0;
}
{
int rc = 0;
{
{
{
}
else
}
else
}
else
}
/*******************************************************************************
*
* CoreAudio global section
*
******************************************************************************/
static void *coreaudio_audio_init(void)
{
return &conf;
}
static void coreaudio_audio_fini(void *opaque)
{
}
static struct audio_option coreaudio_options[] =
{
"Size of the buffer in frames", NULL, 0},
};
static struct audio_pcm_ops coreaudio_pcm_ops =
{
};
struct audio_driver coreaudio_audio_driver =
{
INIT_FIELD(descr =)
"CoreAudio http://developer.apple.com/audio/coreaudio.html",
};