DrvHostCoreAudio.cpp revision 1705f7565ed8533058b8541d72d6c5d4453de00f
/* $Id$ */
/** @file
* VBox audio devices: Mac OS X CoreAudio audio driver.
*/
/*
* Copyright (C) 2010-2015 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
#include "DrvAudio.h"
#include "AudioMixBuffer.h"
#include "vl_vbox.h"
#include <CoreAudio/CoreAudio.h>
#include <CoreServices/CoreServices.h>
#include <AudioUnit/AudioUnit.h>
#include <AudioToolbox/AudioConverter.h>
#ifdef LOG_GROUP
#endif
#define LOG_GROUP LOG_GROUP_DEV_AUDIO
/* TODO:
*/
/*
* Most of this is based on:
*/
/**
* Host Coreaudio driver instance data.
* @implements PDMIAUDIOCONNECTOR
*/
typedef struct DRVHOSTCOREAUDIO
{
/** Pointer to the driver instance structure. */
/** Pointer to host audio interface. */
/*******************************************************************************
*
* Helper function section
*
******************************************************************************/
#ifdef DEBUG
static void drvHostCoreAudioPrintASBDesc(const char *pszDesc, const AudioStreamBasicDescription *pStreamDesc)
{
char pszSampleRate[32];
Log((" Float"));
Log((" BigEndian"));
Log((" SignedInteger"));
Log((" Packed"));
Log((" AlignedHigh"));
Log((" NonInterleaved"));
Log((" NonMixable"));
Log((" AllClear"));
Log(("\n"));
snprintf(pszSampleRate, 32, "%.2f", (float)pStreamDesc->mSampleRate); /** @todo r=andy Use RTStrPrint*. */
}
#endif /* DEBUG */
static void drvHostCoreAudioPCMInfoToASBDesc(PDMPCMPROPS *pPcmProperties, AudioStreamBasicDescription *pStreamDesc)
{
if (pPcmProperties->fSigned)
}
static OSStatus drvHostCoreAudioSetFrameBufferSize(AudioDeviceID deviceID, bool fInput, UInt32 cReqSize, UInt32 *pcActSize)
{
/* First try to set the new frame buffer size. */
OSStatus err = AudioObjectSetPropertyData(deviceID, &propAdr, NULL, 0, sizeof(cReqSize), &cReqSize);
/* Check if it really was set. */
return err;
/* If both sizes are the same, we are done. */
return noErr;
/* If not we have to check the limits of the device. First get the size of
the buffer size range property. */
return err;
if (pRange)
{
{
{
/* Search for the absolute minimum. */
|| cMin == -1)
/* Search for the best maximum which isn't bigger than cReqSize. */
{
}
}
if (cMax == -1)
/* First try to set the new frame buffer size. */
{
/* Check if it really was set. */
}
}
}
else
return err;
}
{
AudioObjectPropertyAddress propAdr = { kAudioDevicePropertyDeviceIsRunning, kAudioObjectPropertyScopeGlobal,
if (err != kAudioHardwareNoError)
return (uFlag >= 1);
}
{
{
return VERR_NOT_FOUND;
}
*ppszString = pszResult;
return VINF_SUCCESS;
}
{
/* Create a CFString out of our CString. */
/* Fill the translation structure. */
/* Fetch the translation from the UID to the device ID. */
AudioObjectPropertyAddress propAdr = { kAudioHardwarePropertyDeviceForUID, kAudioObjectPropertyScopeGlobal,
OSStatus err = AudioObjectGetPropertyData(kAudioObjectSystemObject, &propAdr, 0, NULL, &uSize, &translation);
/* Release the temporary CFString */
return deviceID;
/* Return the unknown device on error. */
return kAudioDeviceUnknown;
}
/*******************************************************************************
*
* Global structures section
*
******************************************************************************/
/* Initialization status indicator used for the recreation of the AudioUnits. */
/* Error code which indicates "End of data" */
typedef struct COREAUDIOSTREAMOUT
{
/** Host stream out. */
/* Stream description which is default on the device */
/* Stream description which is selected for using by VBox */
/* The audio device ID of the currently used device */
/* The AudioUnit used */
/* A ring buffer for transferring data to the playback thread. */
/* Temporary buffer for copying over audio data into Core Audio. */
void *pvPCMBuf;
/** Size of the temporary buffer. */
/* Initialization status tracker. Used when some of the device parameters
* or the device itself is changed during the runtime. */
typedef struct COREAUDIOSTREAMIN
{
/** Host stream in. */
/* Stream description which is default on the device */
/* Stream description which is selected for using by VBox */
/* The audio device ID of the currently used device */
/* The AudioUnit used */
/* The audio converter if necessary */
/* A temporary position value used in the caConverterCallback function */
/* The ratio between the device & the stream sample rate */
/* An extra buffer used for render the audio data in the recording thread */
/* A ring buffer for transferring data from the recording thread */
/* Initialization status tracker. Used when some of the device parameters
* or the device itself is changed during the runtime. */
static int drvHostCoreAudioControlIn(PPDMIHOSTAUDIO pInterface, PPDMAUDIOHSTSTRMIN pHstStrmIn, PDMAUDIOSTREAMCMD enmStreamCmd);
static int drvHostCoreAudioControlOut(PPDMIHOSTAUDIO pInterface, PPDMAUDIOHSTSTRMOUT pHstStrmOut, PDMAUDIOSTREAMCMD enmStreamCmd);
static int drvHostCoreAudioReinitOutput(PPDMIHOSTAUDIO pInterface, PPDMAUDIOHSTSTRMOUT pHstStrmOut);
static OSStatus drvHostCoreAudioPlaybackAudioDevicePropertyChanged(AudioObjectID propertyID, UInt32 nAddresses, const AudioObjectPropertyAddress properties[], void *pvUser);
static OSStatus drvHostCoreAudioPlaybackCallback(void *pvUser, AudioUnitRenderActionFlags *pActionFlags, const AudioTimeStamp *pAudioTS, UInt32 uBusID, UInt32 cFrames, AudioBufferList* pBufData);
const AudioObjectPropertyAddress properties[],
void *pvUser)
{
switch (propertyID)
{
{
/* This listener is called on every change of the hardware
* device. So check if the default device has really changed. */
{
{
LogRel(("CoreAudio: Default input device has changed\n"));
/* We move the reinitialization to the next input event.
* This make sure this thread isn't blocked and the
* reinitialization is done when necessary only. */
}
}
break;
}
default:
break;
}
return noErr;
}
{
return VINF_SUCCESS;
}
{
return VINF_SUCCESS;
}
/* Callback for getting notified when some of the properties of an audio device has changed. */
static DECLCALLBACK(OSStatus) drvHostCoreAudioRecordingAudioDevicePropertyChanged(AudioObjectID propertyID,
const AudioObjectPropertyAddress aProperties[],
void *pvUser)
{
switch (propertyID)
{
#ifdef DEBUG
{
LogFunc(("Processor overload detected!\n"));
break;
}
#endif /* DEBUG */
{
LogRel(("CoreAudio: Recording sample rate changed\n"));
/* We move the reinitialization to the next input event.
* This make sure this thread isn't blocked and the
* reinitialization is done when necessary only. */
break;
}
default:
break;
}
return noErr;
}
/* Callback to convert audio input data from one format to another. */
void *pvUser)
{
/** @todo Check incoming pointers. */
/** @todo Check converter ID? */
return noErr;
/** @todo In principle we had to check here if the source is non interleaved, and if so,
* so go through all buffers not only the first one like now. */
/* Use the lower one of the packets to process & the available packets in the buffer. */
/* Set the new size on output, so the caller know what we have processed. */
/* If no data is available anymore we return with an error code. This error code will be returned
* from AudioConverterFillComplexBuffer. */
if (*pcPackets == 0)
{
}
else
{
}
return err;
}
/* Callback to feed audio input buffer. */
const AudioTimeStamp *pAudioTS,
{
return noErr;
/* If nothing is pending return immediately. */
if (cFrames == 0)
return noErr;
int rc = VINF_SUCCESS;
do
{
/* Are we using a converter? */
{
/* First, render the data as usual. */
pStreamIn->bufferList.mBuffers[0].mData = RTMemAlloc(pStreamIn->bufferList.mBuffers[0].mDataByteSize);
{
rc = VERR_NO_MEMORY;
break;
}
err = AudioUnitRender(pStreamIn->audioUnit, pActionFlags, pAudioTS, uBusID, cFrames, &pStreamIn->bufferList);
{
break;
}
size_t cbAvail = RT_MIN(RTCircBufFree(pStreamIn->pBuf), pStreamIn->bufferList.mBuffers[0].mDataByteSize);
/* Initialize the temporary output buffer */
/* Iterate as long as data is available. */
while (cbAvail)
{
/* Try to acquire the necessary space from the ring buffer. */
if (!cbToWrite)
break;
/* Now set how much space is available for output. */
/* Set our ring buffer as target. */
err = AudioConverterFillComplexBuffer(pStreamIn->converter, drvHostCoreAudioConverterCallback, pStreamIn,
&& err != caConverterEOFDErr)
{
break;
}
/* Check in any case what processed size is returned. It could be less than we expected. */
/* Release the ring buffer, so the main thread could start reading this data. */
/* If the error is "End of Data" it means there is no data anymore
* which could be converted. So end here now. */
if (err == caConverterEOFDErr)
break;
}
}
else /* No converter being used. */
{
pStreamIn->bufferList.mBuffers[0].mData = RTMemAlloc(pStreamIn->bufferList.mBuffers[0].mDataByteSize);
{
rc = VERR_NO_MEMORY;
break;
}
err = AudioUnitRender(pStreamIn->audioUnit, pActionFlags, pAudioTS, uBusID, cFrames, &pStreamIn->bufferList);
{
break;
}
size_t cbAvail = RT_MIN(RTCircBufFree(pStreamIn->pBuf), pStreamIn->bufferList.mBuffers[0].mDataByteSize);
/* Iterate as long as data is available. */
uint32_t cbWrittenTotal = 0;
while(cbAvail)
{
/* Try to acquire the necessary space from the ring buffer. */
if (!cbToWrite)
break;
/* Copy the data from the core audio buffer to the ring buffer. */
/* Release the ring buffer, so the main thread could start reading this data. */
}
}
{
}
} while (0);
return err;
}
/** @todo Eventually split up this function, as this already is huge! */
{
{
/* Fetch the default audio input device currently in use. */
err = AudioObjectGetPropertyData(kAudioObjectSystemObject, &propAdr, 0, NULL, &uSize, &pStreamIn->deviceID);
{
return VERR_NOT_FOUND;
}
}
/*
* Try to get the name of the input device and log it. It's not fatal if it fails.
*/
uSize = sizeof(CFStringRef);
{
char *pszDevName = NULL;
{
/* Get the device' UUID. */
{
{
}
}
}
}
else
/* Get the default frames buffer size, so that we can setup our internal buffers. */
{
LogRel(("CoreAudio: Failed to determine frame buffer size of the audio input device (%RI32)\n", err));
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
err = drvHostCoreAudioSetFrameBufferSize(pStreamIn->deviceID, true /* fInput */, cFrames, &cFrames);
{
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
/* Try to find the default HAL output component. */
if (cp == 0)
{
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
/* Open the default HAL output component. */
{
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
/* Switch the I/O mode for input to on. */
err = AudioUnitSetProperty(pStreamIn->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input,
{
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
/* Switch the I/O mode for input to off. This is important, as this is a pure input stream. */
uFlag = 0;
err = AudioUnitSetProperty(pStreamIn->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output,
{
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
/* Set the default audio input device as the device for the new AudioUnit. */
err = AudioUnitSetProperty(pStreamIn->audioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global,
{
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
/*
* CoreAudio will inform us on a second thread for new incoming audio data.
* Therefor register a callback function which will process the new data.
*/
err = AudioUnitSetProperty(pStreamIn->audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global,
{
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
/* Fetch the current stream format of the device. */
err = AudioUnitGetProperty(pStreamIn->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input,
{
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
/* Create an AudioStreamBasicDescription based on our required audio settings. */
#ifdef DEBUG
#endif /* DEBUG */
/* If the frequency of the device is different from the requested one we
* need a converter. The same count if the number of channels is different. */
{
{
LogRel(("CoreAudio: Failed to create the audio converte(%RI32). Input Format=%d, Output Foramt=%d\n",
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
{
/*
* If the channel count is different we have to tell this the converter
* and supply a channel mapping. For now we only support mapping
* from mono to stereo. For all other cases the core audio defaults
* are used, which means dropping additional channels in most
* cases.
*/
err = AudioConverterSetProperty(pStreamIn->converter, kAudioConverterChannelMap, sizeof(channelMap), channelMap);
{
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
}
/* Set the new input format description for the stream. */
err = AudioUnitSetProperty(pStreamIn->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input,
{
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
#if 0
/* Set sample rate converter quality to maximum */
#endif
LogRel(("CoreAudio: Input converter is active\n"));
}
/* Set the new output format description for the stream. */
err = AudioUnitSetProperty(pStreamIn->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output,
{
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
/*
* Also set the frame buffer size off the device on our AudioUnit. This
* should make sure that the frames count which we receive in the render
* thread is as we like.
*/
err = AudioUnitSetProperty(pStreamIn->audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global,
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
/* Finally initialize the new AudioUnit. */
{
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
err = AudioUnitGetProperty(pStreamIn->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output,
{
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
/*
* There are buggy devices (e.g. my Bluetooth headset) which doesn't honor
* the frame buffer size set in the previous calls. So finally get the
* frame buffer size after the AudioUnit was initialized.
*/
err = AudioUnitGetProperty(pStreamIn->audioUnit, kAudioUnitProperty_MaximumFramesPerSlice,kAudioUnitScope_Global,
{
LogRel(("CoreAudio: Failed to get maximum frame buffer size from input audio device (%RI32)\n", err));
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
/* Calculate the ratio between the device and the stream sample rate. */
/* Set to zero first */
/* Create the AudioBufferList structure with one buffer. */
/* Initialize the buffer to nothing. */
/* Make sure that the ring buffer is big enough to hold the recording
* data. Compare the maximum frames per slice value with the frames
* necessary when using the converter where the sample rate could differ.
* The result is always multiplied by the channels per frame to get the
* samples count. */
(cFrames * pStreamIn->deviceFormat.mBytesPerFrame * pStreamIn->sampleRatio) / pStreamIn->streamFormat.mBytesPerFrame)
int rc = VINF_SUCCESS;
#if 0
if ( pHstStrmIn->cSamples != 0
LogRel(("CoreAudio: Warning! After recreation, the CoreAudio ring buffer doesn't has the same size as the device buffer (%RU32 vs. %RU32).\n", cSamples, (uint32_t)pHstStrmIn->cSamples));
/* Create the internal ring buffer. */
rc = 0;
else
LogRel(("CoreAudio: Failed to create internal ring buffer\n"));
#endif
if (RT_FAILURE(rc))
{
return rc;
}
#ifdef DEBUG
LogRel(("CoreAudio: Failed to add the processor overload listener for input stream (%RI32)\n", err));
#endif /* DEBUG */
/* Not fatal. */
LogRel(("CoreAudio: Failed to register sample rate changed listener for input stream (%RI32)\n", err));
if (pcSamples)
return VINF_SUCCESS;
}
/** @todo Eventually split up this function, as this already is huge! */
{
{
/* Fetch the default audio input device currently in use. */
err = AudioObjectGetPropertyData(kAudioObjectSystemObject, &propAdr, 0, NULL, &uSize, &pStreamOut->deviceID);
{
return VERR_NOT_FOUND;
}
}
/*
* Try to get the name of the input device and log it. It's not fatal if it fails.
*/
uSize = sizeof(CFStringRef);
{
char *pszDevName = NULL;
{
/* Get the device' UUID. */
{
{
}
}
}
}
else
/* Get the default frames buffer size, so that we can setup our internal buffers. */
{
LogRel(("CoreAudio: Failed to determine frame buffer size of the audio output device (%RI32)\n", err));
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
err = drvHostCoreAudioSetFrameBufferSize(pStreamOut->deviceID, false /* fInput */, cFrames, &cFrames);
{
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
/* Try to find the default HAL output component. */
if (cp == 0)
{
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
/* Open the default HAL output component. */
{
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
/* Switch the I/O mode for output to on. */
err = AudioUnitSetProperty(pStreamOut->audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output,
{
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
/* Set the default audio output device as the device for the new AudioUnit. */
err = AudioUnitSetProperty(pStreamOut->audioUnit, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global,
{
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
/*
* CoreAudio will inform us on a second thread for new incoming audio data.
* Therefor register a callback function which will process the new data.
*/
err = AudioUnitSetProperty(pStreamOut->audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input,
{
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
/* Fetch the current stream format of the device. */
err = AudioUnitGetProperty(pStreamOut->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input,
{
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
/* Create an AudioStreamBasicDescription based on our required audio settings. */
#ifdef DEBUG
#endif /* DEBUG */
/* Set the new output format description for the stream. */
err = AudioUnitSetProperty(pStreamOut->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input,
{
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
err = AudioUnitGetProperty(pStreamOut->audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input,
{
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
/*
* Also set the frame buffer size off the device on our AudioUnit. This
* should make sure that the frames count which we receive in the render
* thread is as we like.
*/
err = AudioUnitSetProperty(pStreamOut->audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global,
{
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
/* Finally initialize the new AudioUnit. */
{
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
/*
* There are buggy devices (e.g. my Bluetooth headset) which doesn't honor
* the frame buffer size set in the previous calls. So finally get the
* frame buffer size after the AudioUnit was initialized.
*/
err = AudioUnitGetProperty(pStreamOut->audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global,
{
LogRel(("CoreAudio: Failed to get maximum frame buffer size from output audio device (%RI32)\n", err));
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
int rc = VINF_SUCCESS;
#if 0
/* Calculate the ratio between the device and the stream sample rate. */
pStreamOut->sampleRatio = pStreamOut->streamFormat.mSampleRate / pStreamOut->deviceFormat.mSampleRate;
/* Set to zero first */
/* Create the AudioBufferList structure with one buffer. */
/* Initialize the buffer to nothing. */
/* Make sure that the ring buffer is big enough to hold the recording
* data. Compare the maximum frames per slice value with the frames
* necessary when using the converter where the sample rate could differ.
* The result is always multiplied by the channels per frame to get the
* samples count. */
(cFrames * pStreamOut->deviceFormat.mBytesPerFrame * pStreamOut->sampleRatio) / pStreamOut->streamFormat.mBytesPerFrame)
if ( pHstStrmIn->cSamples != 0
LogRel(("CoreAudio: Warning! After recreation, the CoreAudio ring buffer doesn't has the same size as the device buffer (%RU32 vs. %RU32).\n", cSamples, (uint32_t)pHstStrmIn->cSamples));
/* Create the internal ring buffer. */
rc = 0;
else
LogRel(("CoreAudio: Failed to create internal ring buffer\n"));
#endif
if (RT_FAILURE(rc))
{
/*if (pStreamOut->pBuf)
RTCircBufDestroy(pStreamOut->pBuf);*/
return rc;
}
#ifdef DEBUG
LogRel(("CoreAudio: Failed to register processor overload listener for output stream (%RI32)\n", err));
#endif /* DEBUG */
/* Not fatal. */
LogRel(("CoreAudio: Failed to register sample rate changed listener for output stream (%RI32)\n", err));
/* Allocate temporary buffer. */
if (!pStreamOut->pvPCMBuf)
rc = VERR_NO_MEMORY;
if (RT_SUCCESS(rc))
{
if (pcSamples)
}
return rc;
}
{
return VINF_SUCCESS;
}
static DECLCALLBACK(int) drvHostCoreAudioCaptureIn(PPDMIHOSTAUDIO pInterface, PPDMAUDIOHSTSTRMIN pHstStrmIn,
{
char *pcSrc;
/* Check if the audio device should be reinitialized. If so do it. */
{
if (pcSamplesCaptured)
*pcSamplesCaptured = 0;
return VINF_SUCCESS;
}
int rc = VINF_SUCCESS;
uint32_t cbWrittenTotal = 0;
do
{
while (cbToWrite)
{
/* Try to acquire the necessary block from the ring buffer. */
if (!cbToRead)
{
break;
}
if (RT_FAILURE(rc))
break;
/* Release the read buffer, so it could be used for new data. */
}
}
while (0);
if (RT_SUCCESS(rc))
{
if (cWrittenTotal)
if (pcSamplesCaptured)
}
return rc;
}
/* Callback for getting notified when some of the properties of an audio device has changed. */
static DECLCALLBACK(OSStatus) drvHostCoreAudioPlaybackAudioDevicePropertyChanged(AudioObjectID propertyID,
const AudioObjectPropertyAddress properties[],
void *pvUser)
{
switch (propertyID)
{
#ifdef DEBUG
{
Log2(("CoreAudio: [Output] Processor overload detected!\n"));
break;
}
#endif /* DEBUG */
default:
break;
}
return noErr;
}
/* Callback to feed audio output buffer. */
const AudioTimeStamp *pAudioTS,
{
{
return noErr;
}
/* How much space is used in the ring buffer? */
if (!cbDataAvail)
{
return noErr;
}
while (cbDataAvail)
{
/* Try to acquire the necessary block from the ring buffer. */
/* Break if nothing is used anymore. */
if (!cbToRead == 0)
break;
/* Copy the data from our ring buffer to the core audio buffer. */
/* Release the read buffer, so it could be used for new data. */
/* Move offset. */
}
/* Write the bytes to the core audio buffer which where really written. */
return noErr;
}
static DECLCALLBACK(int) drvHostCoreAudioPlayOut(PPDMIHOSTAUDIO pInterface, PPDMAUDIOHSTSTRMOUT pHstStrmOut,
{
/* Check if the audio device should be reinitialized. If so do it. */
/* Not much else to do here. */
if (!cLive) /* Not samples to play? Bail out. */
{
if (pcSamplesPlayed)
*pcSamplesPlayed = 0;
return VINF_SUCCESS;
}
int rc = VINF_SUCCESS;
uint32_t cbReadTotal = 0;
do
{
while (cbToRead)
{
if (RT_FAILURE(rc))
break;
/* Try to acquire the necessary space from the ring buffer. */
if (!cbToWrite)
{
break;
}
/* Transfer data into stream's own ring buffer. The playback will operate on this
* own ring buffer separately. */
/* Release the ring buffer, so the read thread could start reading this data. */
cbReadTotal += cbRead;
}
}
while (0);
if (RT_SUCCESS(rc))
{
if (cReadTotal)
if (pcSamplesPlayed)
}
return rc;
}
static DECLCALLBACK(int) drvHostCoreAudioControlOut(PPDMIHOSTAUDIO pInterface, PPDMAUDIOHSTSTRMOUT pHstStrmOut,
{
if (!( uStatus == CA_STATUS_INIT
|| uStatus == CA_STATUS_REINIT))
{
return VINF_SUCCESS;
}
int rc = VINF_SUCCESS;
switch (enmStreamCmd)
{
case PDMAUDIOSTREAMCMD_ENABLE:
{
/* Only start the device if it is actually stopped */
{
{
/* Keep going. */
}
{
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
}
break;
}
{
/* Only stop the device if it is actually running */
{
{
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
{
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
}
break;
}
default:
break;
}
return rc;
}
static DECLCALLBACK(int) drvHostCoreAudioControlIn(PPDMIHOSTAUDIO pInterface, PPDMAUDIOHSTSTRMIN pHstStrmIn,
{
if (!( uStatus == CA_STATUS_INIT
|| uStatus == CA_STATUS_REINIT))
{
return VINF_SUCCESS;
}
int rc = VINF_SUCCESS;
switch (enmStreamCmd)
{
case PDMAUDIOSTREAMCMD_ENABLE:
{
/* Only start the device if it is actually stopped */
{
}
{
}
break;
}
{
/* Only stop the device if it is actually running */
{
{
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
{
return VERR_GENERAL_FAILURE; /** @todo Fudge! */
}
}
break;
}
default:
break;
}
return rc;
}
static DECLCALLBACK(int) drvHostCoreAudioFiniIn(PPDMIHOSTAUDIO pInterface, PPDMAUDIOHSTSTRMIN pHstStrmIn)
{
int rc = 0;
LogFlow(("drvHostCoreAudioFiniIn \n"));
if (!( status == CA_STATUS_INIT
|| status == CA_STATUS_REINIT))
{
return VINF_SUCCESS;
}
if (RT_SUCCESS(rc))
{
#ifdef DEBUG
/* Not Fatal */
#endif /* DEBUG */
/* Not Fatal */
{
}
{
{
}
else
}
else
}
else
return VINF_SUCCESS;
}
static DECLCALLBACK(int) drvHostCoreAudioFiniOut(PPDMIHOSTAUDIO pInterface, PPDMAUDIOHSTSTRMOUT pHstStrmOut)
{
if (!( status == CA_STATUS_INIT
|| status == CA_STATUS_REINIT))
{
return VINF_SUCCESS;
}
if (RT_SUCCESS(rc))
{
#if 0
/* Not Fatal */
#endif /* DEBUG */
{
{
if (pStreamOut->pvPCMBuf)
{
pStreamOut->cbPCMBuf = 0;
}
}
else
}
else
}
else
return VINF_SUCCESS;
}
{
int rc = -1;
bool fDeviceByUser = false;
LogFlow(("drvHostCoreAudioInitIn \n"));
LogFlow(("drvHostCoreAudioInitIn \n"));
/* Initialize the hardware info section with the audio settings */
#if 0
/* Try to find the audio device set by the user */
{
/* Not fatal */
LogRel(("CoreAudio: Unable to find input device %s. Falling back to the default audio device. \n", DeviceUID.pszInputDeviceUID));
else
fDeviceByUser = true;
}
#endif
if (RT_FAILURE(rc))
return rc;
/* When the devices isn't forced by the user, we want default device change notifications. */
if (!fDeviceByUser)
{
AudioObjectPropertyAddress propAdr = { kAudioHardwarePropertyDefaultInputDevice, kAudioObjectPropertyScopeGlobal,
drvHostCoreAudioDefaultDeviceChanged, (void *)pStreamIn);
/* Not fatal. */
}
return VINF_SUCCESS;
}
{
int rc = 0;
bool fDeviceByUser = false; /* use we a device which was set by the user? */
LogFlow(("drvHostCoreAudioInitOut\n"));
/* Initialize the hardware info section with the audio settings */
#if 0
/* Try to find the audio device set by the user. Use
* export VBOX_COREAUDIO_OUTPUT_DEVICE_UID=AppleHDAEngineOutput:0
* to set it. */
{
/* Not fatal */
LogRel(("CoreAudio: Unable to find output device %s. Falling back to the default audio device. \n", DeviceUID.pszOutputDeviceUID));
else
fDeviceByUser = true;
}
#endif
if (RT_FAILURE(rc))
return rc;
/* When the devices isn't forced by the user, we want default device change notifications. */
if (!fDeviceByUser)
{
AudioObjectPropertyAddress propAdr = { kAudioHardwarePropertyDefaultOutputDevice, kAudioObjectPropertyScopeGlobal,
drvHostCoreAudioDefaultDeviceChanged, (void *)pStreamOut);
/* Not fatal. */
}
return VINF_SUCCESS;
}
{
return true; /* Always all enabled. */
}
static DECLCALLBACK(int) drvHostCoreAudioGetConf(PPDMIHOSTAUDIO pInterface, PPDMAUDIOBACKENDCFG pAudioConf)
{
return VINF_SUCCESS;
}
{
}
static DECLCALLBACK(void *) drvHostCoreAudioQueryInterface(PPDMIBASE pInterface, const char *pszIID)
{
return NULL;
}
/* Construct a DirectSound Audio driver instance.
*
* @copydoc FNPDMDRVCONSTRUCT
*/
static DECLCALLBACK(int) drvHostCoreAudioConstruct(PPDMDRVINS pDrvIns, PCFGMNODE pCfg, uint32_t fFlags)
{
LogRel(("Audio: Initializing Core Audio driver\n"));
/*
* Init the static parts.
*/
/* IBase */
/* IHostAudio */
return VINF_SUCCESS;
}
/**
* Char driver registration record.
*/
const PDMDRVREG g_DrvHostCoreAudio =
{
/* u32Version */
/* szName */
"CoreAudio",
/* szRCMod */
"",
/* szR0Mod */
"",
/* pszDescription */
"Core Audio host driver",
/* fFlags */
/* fClass. */
/* cMaxInstances */
~0U,
/* cbInstance */
sizeof(DRVHOSTCOREAUDIO),
/* pfnConstruct */
/* pfnDestruct */
NULL,
/* pfnRelocate */
NULL,
/* pfnIOCtl */
NULL,
/* pfnPowerOn */
NULL,
/* pfnReset */
NULL,
/* pfnSuspend */
NULL,
/* pfnResume */
NULL,
/* pfnAttach */
NULL,
/* pfnDetach */
NULL,
/* pfnPowerOff */
NULL,
/* pfnSoftReset */
NULL,
/* u32EndVersion */
};