This commit is contained in:
nephacks
2025-06-04 03:22:50 +02:00
parent f234f23848
commit f12416cffd
14243 changed files with 6446499 additions and 26 deletions

View File

@@ -0,0 +1,432 @@
//===== Copyright <20> Valve Corporation, All rights reserved. =================//
//
// Purpose: audio mix data structures
//
//===========================================================================//
#ifndef AUDIO_MIX_H
#define AUDIO_MIX_H
#ifdef _WIN32
#pragma once
#endif
#include "strtools.h" // V_memset
#include "utlstring.h"
#include "utlstringtoken.h"
#include "soundsystem/lowlevel.h"
struct dspglobalvars_t
{
float m_flMixMin; // min dsp mix at close range
float m_flMixMax; // max dsp mix at long range
float m_fldbMixDrop; // reduce mix_min/max by n% if sndlvl of new sound less than dbMin
float m_flDistanceMin; // range at which sounds are mixed at flMixMin
float m_flDistanceMax; // range at which sounds are mixed at flMixMax
uint16 m_ndbMin; // if sndlvl of a new sound is < dbMin, reduce mix_min/max by m_fldbMixDrop
bool m_bIsOff;
};
class CAudioProcessor
{
public:
virtual ~CAudioProcessor() {}
CAudioProcessor( const char *pDebugName, int nChannelCount );
void SetDebugName( const char *pName );
virtual void Process( CAudioMixBuffer *pInput, CAudioMixBuffer *pOutput, int nChannelCount, dspglobalvars_t *pGlobals );
virtual void ProcessSingleChannel( const CAudioMixBuffer &input, CAudioMixBuffer *pOutput, int nChannelIndex ) = 0;
// Parameter set can modify internal or global state (or both)
virtual bool SetControlParameter( CUtlStringToken name, float flValue );
virtual float GetControlParameter( CUtlStringToken name, float flDefaultValue = 0.0f ) = 0;
virtual bool SetNameParameter( CUtlStringToken name, uint32 nNameValue ) = 0;
virtual uint32 GetNameParameter( CUtlStringToken name, uint32 nDefaultValue ) = 0;
virtual bool ShouldProcess();
float GetPrevMix( float flMix );
void ApplyMonoProcessor( CAudioMixBuffer *pInput, CAudioMixBuffer *pOutput, int nOutputChannelCount, float flMix );
void ApplyStereoProcessor( CAudioMixBuffer *pInput, CAudioMixBuffer *pOutput, int nOutputChannelCount, float flMix );
void ApplyNChannelProcessor( CAudioMixBuffer *pInput, CAudioMixBuffer *pOutput, int nChannelCount, float flMix );
CUtlString m_debugName;
uint32 m_nNameHashCode;
float m_flXFade;
float m_flXFadePrev;
float m_flMix;
int m_nChannels;
bool m_bEnabled;
};
struct audio_buffer_input_t
{
const short *m_pSamples; // pointer to the samples themselves (in 8, 16, or 32-bit format)
uint m_nSampleCount; // number of whole samples *not bytes* *not multiplied by channel count* (e.g. a one-second 16-bit 44.1KHz file would be 44100 samples)
};
// UNDONE: deprecate this and move to a 8-int, 16-int, 32-float, N channel with extract specific channel design
enum vaudio_sampleformats_t
{
SAMPLE_INT16_MONO = 0, // default
SAMPLE_INT8_MONO, // increase to 16-bit and convert to float
SAMPLE_INT16_STEREO_L, // stereo wave, extract left channel
SAMPLE_INT16_STEREO_R, // stereo wave, extract right channel
SAMPLE_INT8_STEREO_L, // stereo wave, extract left channel
SAMPLE_INT8_STEREO_R, // stereo wave, extract right channel
SAMPLE_FLOAT32_MONO, // no reformat needed
};
struct audio_source_input_t
{
const audio_buffer_input_t *m_pPackets;
uint m_nSamplingRate;
uint16 m_nPacketCount;
uint16 m_nSampleFormat;
void InitPackets( const audio_buffer_input_t *pPacketsIn, int nPacketCountIn, int nSamplingRate, int nBitsPerSample, int nChannelsPerSample )
{
V_memset( this, 0, sizeof(*this) );
Assert( nPacketCountIn >= 0 && nPacketCountIn <= UINT16_MAX );
m_nPacketCount = (uint16)nPacketCountIn;
m_pPackets = pPacketsIn;
m_nSamplingRate = nSamplingRate;
switch( nBitsPerSample )
{
case 16:
m_nSampleFormat = (uint16)( (nChannelsPerSample == 1) ? SAMPLE_INT16_MONO : SAMPLE_INT16_STEREO_L );
break;
case 8:
m_nSampleFormat = (uint16)( (nChannelsPerSample == 1) ? SAMPLE_INT8_MONO : SAMPLE_INT8_STEREO_L );
break;
}
}
};
struct audio_source_indexstate_t
{
uint m_nPacketIndex;
uint m_nBufferSampleOffset;
uint m_nSampleFracOffset;
inline void Clear()
{
m_nPacketIndex = 0;
m_nBufferSampleOffset = 0;
m_nSampleFracOffset = 0;
}
};
class CAudioMixState
{
audio_source_input_t *m_pChannelsIn;
audio_source_indexstate_t *m_pChannelsOut;
uint32 m_nInputStride;
uint32 m_nPlaybackStride;
uint32 m_nChannelCount;
dspglobalvars_t *m_pGlobals;
public:
inline void Init( audio_source_input_t *pInput, uint32 nInputStride, audio_source_indexstate_t *pPlayback, uint32 nPlaybackStride, uint32 nCount )
{
m_pChannelsIn = pInput;
m_pChannelsOut = pPlayback;
m_nInputStride = nInputStride;
m_nPlaybackStride = nPlaybackStride;
m_nChannelCount = nCount;
}
void SetDSPGlobals( dspglobalvars_t *pGlobals )
{
m_pGlobals = pGlobals;
}
CAudioMixState( audio_source_input_t *pInput, uint32 nInputStride, audio_source_indexstate_t *pPlayback, uint32 nPlaybackStride, uint32 nCount )
: m_pChannelsIn(pInput), m_pChannelsOut(pPlayback), m_nInputStride(nInputStride), m_nPlaybackStride(nPlaybackStride), m_nChannelCount(nCount)
{
}
CAudioMixState()
{
m_pChannelsIn = 0;
m_pChannelsOut = 0;
m_nChannelCount = 0;
m_pGlobals = nullptr;
}
inline void Clear()
{
m_pChannelsIn = NULL;
m_pChannelsOut = NULL;
m_nChannelCount = 0;
}
inline audio_source_input_t *GetInput( int nIndex ) const
{
return (audio_source_input_t *)( (byte *)m_pChannelsIn + nIndex * m_nInputStride );
}
inline audio_source_indexstate_t *GetOutput( int nIndex ) const
{
return (audio_source_indexstate_t *)( (byte *)m_pChannelsOut + nIndex * m_nPlaybackStride );
}
bool IsChannelFinished( int nChannel ) const
{
return ( GetOutput( nChannel )->m_nPacketIndex >= GetInput( nChannel )->m_nPacketCount ) ? true : false;
}
dspglobalvars_t *DSPGlobals() const { return m_pGlobals; }
};
enum mix_command_id_t
{
AUDIO_MIX_CLEAR = 0,
AUDIO_MIX_EXTRACT_SOURCE,
AUDIO_MIX_ADVANCE_SOURCE,
AUDIO_MIX_ACCUMULATE,
AUDIO_MIX_ACCUMULATE_RAMP,
AUDIO_MIX_MULTIPLY,
AUDIO_MIX_PROCESS,
AUDIO_MIX_SUM,
AUDIO_MIX_SWAP, // swap two buffers
AUDIO_MIX_MEASURE_DEBUG_LEVEL,
AUDIO_MIX_OUTPUT_LEVEL,
};
struct audio_mix_command_t
{
uint16 m_nCommandId;
uint16 m_nOutput;
uint16 m_nInput0;
uint16 m_nInput1;
float m_flParam0;
float m_flParam1;
void Init( mix_command_id_t cmd, uint16 nOut )
{
m_nCommandId = (uint16)cmd;
m_nOutput = nOut;
m_nInput0 = 0;
m_nInput1 = 0;
m_flParam0 = 0.0f;
m_flParam1 = 0.0f;
}
void Init( mix_command_id_t cmd, uint16 nOut, uint16 nIn0, float flScale )
{
m_nCommandId = (uint16)cmd;
m_nOutput = nOut;
m_nInput0 = nIn0;
m_nInput1 = 0;
m_flParam0 = flScale;
m_flParam1 = 0.0f;
}
void Init( mix_command_id_t cmd, uint16 nOut, uint16 nIn0, uint16 nIn1, float flScale0, float flScale1 )
{
m_nCommandId = (uint16)cmd;
m_nOutput = nOut;
m_nInput0 = nIn0;
m_nInput1 = nIn1;
m_flParam0 = flScale0;
m_flParam1 = flScale1;
}
};
class CAudioMixCommandList
{
public:
inline void ClearBuffer( uint16 nTarget )
{
audio_mix_command_t cmd;
cmd.Init( AUDIO_MIX_CLEAR, nTarget );
m_commands.AddToTail( cmd );
}
void ClearMultichannel( uint16 nTarget, int nCount );
void ScaleMultichannel( uint16 nTarget, uint16 nInput, int nCount, float flVolume );
inline void ExtractSourceToBuffer( uint16 nTarget, uint16 nChannel, float flVolume, float flPitch )
{
audio_mix_command_t cmd;
cmd.Init( AUDIO_MIX_EXTRACT_SOURCE, nTarget, nChannel, 0, flVolume, flPitch );
m_commands.AddToTail( cmd );
}
inline void AdvanceSource( uint16 nChannel, float flPitch )
{
audio_mix_command_t cmd;
cmd.Init( AUDIO_MIX_ADVANCE_SOURCE, 0, nChannel, 0, 0, flPitch );
m_commands.AddToTail( cmd );
}
inline void ProcessBuffer( uint16 nOutput, uint16 nInput, uint16 nProcessor, int nChannelCount )
{
audio_mix_command_t cmd;
cmd.Init( AUDIO_MIX_PROCESS, nOutput, nInput, nProcessor, float(nChannelCount), 0.0f );
m_commands.AddToTail( cmd );
}
inline uint16 ProcessBuffer( uint16 nOutput, uint16 nInput, int nChannelCount, CAudioProcessor *pProc )
{
uint16 nProcessor = (uint16)m_processors.AddToTail( pProc );
ProcessBuffer( nOutput, nInput, nProcessor, nChannelCount );
return nProcessor;
}
inline void ScaleBuffer( uint16 nOutput, uint16 nInput, float flVolume )
{
audio_mix_command_t cmd;
cmd.Init( AUDIO_MIX_MULTIPLY, nOutput, nInput, flVolume );
m_commands.AddToTail( cmd );
}
inline void AccumulateToBuffer( uint16 nOutput, uint16 nInput, float flVolume )
{
// if the volume is zero this will have no effect, so it is safe to skip it
if ( flVolume == 0.0f )
return;
audio_mix_command_t cmd;
cmd.Init( AUDIO_MIX_ACCUMULATE, nOutput, nInput, flVolume );
m_commands.AddToTail( cmd );
}
inline void AccumulateToBufferVolumeRamp( uint16 nOutput, uint16 nInput, float flVolumeStart, float flVolumeEnd )
{
// Too small of a volume change to ramp? Just output without the ramp
// 1e-3f is small enough that we might do it during a normal ramp
// (2e-3f is the slope of a full scale fade at 512 samples per batch)
if ( fabs( flVolumeEnd-flVolumeStart) < 1e-3f )
{
AccumulateToBuffer( nOutput, nInput, flVolumeEnd );
return;
}
audio_mix_command_t cmd;
cmd.Init( AUDIO_MIX_ACCUMULATE_RAMP, nOutput, nInput, 0, flVolumeStart, flVolumeEnd );
m_commands.AddToTail( cmd );
}
inline void Mix2x1( uint16 nOutput, uint16 nInput0, uint16 nInput1, float flVolume0, float flVolume1 )
{
audio_mix_command_t cmd;
cmd.Init( AUDIO_MIX_SUM, nOutput, nInput0, nInput1, flVolume0, flVolume1 );
m_commands.AddToTail( cmd );
}
inline void SwapBuffers( uint16 nInput0, uint16 nInput1 )
{
audio_mix_command_t cmd;
cmd.Init( AUDIO_MIX_SWAP, nInput0, nInput1, 1.0f );
m_commands.AddToTail( cmd );
}
inline void ReadOutputLevel( uint16 nLevelOutput, uint16 nInput0, uint16 nInputChannelCount )
{
audio_mix_command_t cmd;
cmd.Init( AUDIO_MIX_OUTPUT_LEVEL, nLevelOutput, nInput0, nInputChannelCount, 1.0f, 1.0f );
m_commands.AddToTail( cmd );
}
inline void DebugReadLevel( uint16 nDebugOutput0, uint16 nInput0, uint16 nInputChannelCount )
{
audio_mix_command_t cmd;
cmd.Init( AUDIO_MIX_MEASURE_DEBUG_LEVEL, nDebugOutput0, nInput0, nInputChannelCount, 1.0f, 1.0f );
m_commands.AddToTail( cmd );
}
inline uint16 AddProcessor( CAudioProcessor *pProc )
{
return (uint16)m_processors.AddToTail( pProc );
}
inline void Clear()
{
m_commands.RemoveAll();
m_processors.RemoveAll();
}
void AccumulateMultichannel( uint16 nOutput, int nOutputChannels, uint16 nInput, int nInputChannels, float flInputVolume );
CUtlVectorFixedGrowable<audio_mix_command_t, 256> m_commands;
CUtlVectorFixedGrowable<CAudioProcessor *, 8> m_processors;
};
// This describes the state for each iteration of mixing
// it is the list of all low-level audio operations that need to take place
// in order to produce one buffer of mixed output
class CAudioMixDescription : public CAudioMixCommandList
{
public:
inline void Init( int nChannels )
{
m_nMixBuffersInUse = 0;
m_nMixBufferMax = 0;
m_nDebugOutputCount = 0;
m_nOutputLevelCount = 0;
Clear();
#if USE_VOICE_LAYERS
for ( int i = 0; i < NUM_VOICE_LAYERS; i++ )
{
m_flLayerVolume[i] = 1.0f;
}
#endif
}
// Add a new mix buffer
inline uint16 AllocMixBuffer( uint nCount = 1 )
{
int nOut = m_nMixBuffersInUse;
m_nMixBuffersInUse += nCount;
m_nMixBufferMax = MAX( m_nMixBufferMax, m_nMixBuffersInUse );
return (uint16)nOut;
}
inline void FreeMixBuffer( uint16 nStart, uint nCount = 1 )
{
// we only support freeing from the end of the stack
Assert( nStart + nCount == m_nMixBuffersInUse );
if ( nStart + nCount == m_nMixBuffersInUse )
{
m_nMixBuffersInUse -= nCount;
}
}
inline int AllocDebugOutputs( int nOutputs )
{
int nRet = m_nDebugOutputCount;
m_nDebugOutputCount += nOutputs;
return nRet;
}
inline int AllocOutputLevels( int nOutputs )
{
int nRet = m_nOutputLevelCount;
m_nOutputLevelCount += nOutputs;
return nRet;
}
uint m_nMixBufferMax;
uint m_nMixBuffersInUse;
uint m_nDebugOutputCount;
uint m_nOutputLevelCount;
#if USE_VOICE_LAYERS
float m_flLayerVolume[NUM_VOICE_LAYERS];
#endif
};
struct mix_debug_outputs_t
{
uint32 m_nChannelCount;
float m_flLevel;
float m_flChannelLevels[8];
};
// NOTE: This object is large (>64KB) declaring one on the stack may crash some platforms
class CAudioMixResults
{
public:
CUtlVectorFixedGrowable<mix_debug_outputs_t,8> m_debugOutputs;
CUtlVectorFixedGrowable<float, 16> m_flOutputLevels;
CUtlVectorFixedGrowable<CAudioMixBuffer, 32> m_pOutput;
};
extern void ProcessAudioMix( CAudioMixResults *pResults, const CAudioMixState &mixState, CAudioMixDescription &mixSetup );
#endif // AUDIO_MIX_H

View File

@@ -0,0 +1,118 @@
//===== Copyright <20> 1996-2005, Valve Corporation, All rights reserved. ======//
//
// Purpose:
//
//===========================================================================//
#ifndef ISOUNDSYSTEM_H
#define ISOUNDSYSTEM_H
#ifdef _WIN32
#pragma once
#endif
#include "appframework/iappsystem.h"
//-----------------------------------------------------------------------------
// Forward declarations
//-----------------------------------------------------------------------------
class IAudioDevice;
class CAudioSource;
class CAudioMixer;
//-----------------------------------------------------------------------------
// Sound handle
//-----------------------------------------------------------------------------
typedef unsigned short AudioSourceHandle_t;
enum
{
AUDIOSOURCEHANDLE_INVALID = (AudioSourceHandle_t)~0
};
//-----------------------------------------------------------------------------
// Flags for FindAudioSource
//-----------------------------------------------------------------------------
enum FindAudioSourceFlags_t
{
FINDAUDIOSOURCE_NODELAY = 0x1,
FINDAUDIOSOURCE_PREFETCH = 0x2,
FINDAUDIOSOURCE_PLAYONCE = 0x4,
};
#include "soundsystem/audio_mix.h"
/* filter types */
enum audio_filter_type_t
{
FILTER_LOWPASS = 0, /* low pass filter */
FILTER_HIGHPASS, /* High pass filter */
FILTER_BANDPASS, /* band pass filter */
FILTER_NOTCH, /* Notch Filter */
FILTER_PEAKING_EQ, /* Peaking band EQ filter */
FILTER_LOW_SHELF, /* Low shelf filter */
FILTER_HIGH_SHELF /* High shelf filter */
};
class IAudioMix
{
public:
virtual ~IAudioMix() {}
virtual void Process( CAudioMixState *pState ) = 0;
CAudioMixBuffer *m_pOutput;
int m_nOutputChannelCount;
};
class CAudioMixState;
class CAudioMixDescription;
abstract_class ISoundSystem2
{
public:
// NOTE: This is the new sound device architecture. It is a separate standalone piece of tech that does not interact with ISoundSystem's normal
// entry points. Eventually these entry points will replace many of the entry points of ISoundSystem and this interface will transition to
// the new architecture. This is just here for prototyping and testing.
// NULL chooses default
virtual IAudioDevice2 *CreateDevice( const audio_device_init_params_t *pParams ) = 0;
virtual void DestroyDevice( IAudioDevice2 *pDevice ) = 0;
virtual int EnumerateDevices( int nSubsystem, audio_device_description_t *pDeviceListOut, int nListCount ) = 0;
virtual void HandleDeviceErrors( IAudioDevice2 *pDevice ) = 0;
virtual IAudioMix *CreateMix( const CAudioMixDescription *pMixDescription ) = 0;
virtual void DestroyMix( IAudioMix *pMix ) = 0;
virtual CAudioProcessor *CreateFilter( audio_filter_type_t filterType, float fldbGain, float flCenterFrequency, float flBandWidth ) = 0;
virtual CAudioProcessor *CreateMonoDSP( int nEffect, dspglobalvars_t *pGlobals ) = 0;
};
//-----------------------------------------------------------------------------
// Purpose: DLL interface for low-level sound utilities
//-----------------------------------------------------------------------------
#define SOUNDSYSTEM_INTERFACE_VERSION "SoundSystem001"
abstract_class ISoundSystem : public IAppSystem
{
public:
virtual void Update( float time ) = 0;
virtual void Flush( void ) = 0;
virtual CAudioSource *FindOrAddSound( const char *filename ) = 0;
virtual CAudioSource *LoadSound( const char *wavfile ) = 0;
virtual void PlaySound( CAudioSource *source, float volume, CAudioMixer **ppMixer ) = 0;
virtual bool IsSoundPlaying( CAudioMixer *pMixer ) = 0;
virtual CAudioMixer *FindMixer( CAudioSource *source ) = 0;
virtual void StopAll( void ) = 0;
virtual void StopSound( CAudioMixer *mixer ) = 0;
virtual void GetAudioDevices(CUtlVector< audio_device_description_t >& deviceListOut) const = 0;
};
#endif // ISOUNDSYSTEM_H

View File

@@ -0,0 +1,61 @@
//========= Copyright <20> 1996-2005, Valve Corporation, All rights reserved. ============//
//
// Purpose: Define the IVoiceCodec interface.
//
// $NoKeywords: $
//=============================================================================//
#ifndef IVOICECODEC_H
#define IVOICECODEC_H
#pragma once
#include "interface.h"
#define BYTES_PER_SAMPLE 2
// This interface is for voice codecs to implement.
// Codecs are guaranteed to be called with the exact output from Compress into Decompress (ie:
// data won't be stuck together and sent to Decompress).
// Decompress is not guaranteed to be called in any specific order relative to Compress, but
// Codecs maintain state between calls, so it is best to call Compress with consecutive voice data
// and decompress likewise. If you call it out of order, it will sound wierd.
// In the same vein, calling Decompress twice with the same data is a bad idea since the state will be
// expecting the next block of data, not the same block.
class IVoiceCodec
{
protected:
virtual ~IVoiceCodec() {}
public:
// Initialize the object. The uncompressed format is always 8-bit signed mono.
virtual bool Init( int quality )=0;
// Use this to delete the object.
virtual void Release()=0;
// Compress the voice data.
// pUncompressed - 16-bit signed mono voice data.
// maxCompressedBytes - The length of the pCompressed buffer. Don't exceed this.
// bFinal - Set to true on the last call to Compress (the user stopped talking).
// Some codecs like big block sizes and will hang onto data you give them in Compress calls.
// When you call with bFinal, the codec will give you compressed data no matter what.
// Return the number of bytes you filled into pCompressed.
virtual int Compress(const char *pUncompressed, int nSamples, char *pCompressed, int maxCompressedBytes, bool bFinal)=0;
// Decompress voice data. pUncompressed is 16-bit signed mono.
virtual int Decompress(const char *pCompressed, int compressedBytes, char *pUncompressed, int maxUncompressedBytes)=0;
// Some codecs maintain state between Compress and Decompress calls. This should clear that state.
virtual bool ResetState()=0;
};
#endif // IVOICECODEC_H

View File

@@ -0,0 +1,40 @@
//========= Copyright <20> 1996-2005, Valve Corporation, All rights reserved. ============//
//
// Purpose:
//
// $NoKeywords: $
//=============================================================================//
#ifndef IVOICERECORD_H
#define IVOICERECORD_H
#pragma once
// This is the voice recording interface. It provides 16-bit signed mono data from
// a mic at some sample rate.
abstract_class IVoiceRecord
{
protected:
virtual ~IVoiceRecord() {}
public:
// Use this to delete the object.
virtual void Release()=0;
// Start/stop capturing.
virtual bool RecordStart() = 0;
virtual void RecordStop() = 0;
// Idle processing.
virtual void Idle()=0;
// Get the most recent N samples. If nSamplesWanted is less than the number of
// available samples, it discards the first samples and gives you the last ones.
virtual int GetRecordedData(short *pOut, int nSamplesWanted)=0;
};
#endif // IVOICERECORD_H

View File

@@ -0,0 +1,263 @@
//========= Copyright <20> 1996-2005, Valve Corporation, All rights reserved. ============//
//
// Purpose: This abstracts the various hardware dependent implementations of sound
// At the time of this writing there are Windows WAVEOUT, Direct Sound,
// and Null implementations.
//
//=====================================================================================//
#ifndef SOUNDSYSTEM_LOWLEVEL_H
#define SOUNDSYSTEM_LOWLEVEL_H
#pragma once
#include "utlvector.h"
#define SOUND_DEVICE_MAX_CHANNELS 8 // we support 2, 4, 6, & 8 channels currently.
// Long term we may build 4 & 8 as matrix mix-downs of 6 channels
#define MIX_BUFFER_SIZE 512
class IAudioDevice2;
struct audio_device_init_params_t;
class ALIGN16 CAudioMixBuffer
{
public:
float m_flData[MIX_BUFFER_SIZE];
} ALIGN16_POST;
const float MIX_DEFAULT_SAMPLING_RATE = 44100.0f;
const float SECONDS_PER_SAMPLE = (1.0f / MIX_DEFAULT_SAMPLING_RATE);
const float MIX_SECONDS_PER_BUFFER = float( MIX_BUFFER_SIZE ) / float( MIX_DEFAULT_SAMPLING_RATE );
const float MIX_BUFFERS_PER_SECOND = float( MIX_DEFAULT_SAMPLING_RATE ) / float( MIX_BUFFER_SIZE );
enum eSubSystems_t
{
AUDIO_SUBSYSTEM_XAUDIO = 0,
AUDIO_SUBSYSTEM_DSOUND = 1,
AUDIO_SUBSYSTEM_SDL = 2,
AUDIO_SUBSYSTEM_NULL = 3, // fake, emulated device for failure cases
};
#define AUDIO_DEVICE_NAME_MAX 256
struct audio_device_description_t
{
wchar_t m_deviceName[AUDIO_DEVICE_NAME_MAX];
char m_friendlyName[AUDIO_DEVICE_NAME_MAX];
uint8 m_nSubsystemId;
uint8 m_nChannelCount;
bool m_bIsDefault : 1;
bool m_bIsAvailable : 1;
audio_device_description_t() {}
explicit audio_device_description_t( eSubSystems_t nSubsystem ) : m_nSubsystemId( (uint8)nSubsystem ) { Assert( nSubsystem >= 0 && nSubsystem <= UINT8_MAX ); }
inline void InitAsNullDevice()
{
V_memset( m_deviceName, 0, sizeof(m_deviceName) );
V_memset( m_friendlyName, 0, sizeof(m_friendlyName) );
m_nChannelCount = 2;
m_nSubsystemId = AUDIO_SUBSYSTEM_NULL;
m_bIsDefault = true;
m_bIsAvailable = true;
}
};
class CAudioDeviceList
{
public:
eSubSystems_t m_nSubsystem;
CUtlVector<audio_device_description_t> m_list;
int m_nDefaultDevice;
CAudioDeviceList() {}
void BuildDeviceList( eSubSystems_t nPreferredSubsystem );
bool UpdateDeviceList(); // returns true if new devices or defaults show up
audio_device_description_t *FindDeviceById( const char *pId ); // returns NULL if not found
audio_device_description_t *GetDefaultDevice(); // returns NULL if not set
bool IsValid() { return m_list.Count() > 0; }
IAudioDevice2 *CreateDevice( audio_device_init_params_t &params );
const wchar_t *GetDeviceToCreate( audio_device_init_params_t &params );
private:
uint m_nDeviceStamp;
void UpdateDefaultDevice();
enum finddevice_t
{
FIND_ANY_DEVICE = 0,
FIND_AVAILABLE_DEVICE_ONLY = 1,
};
int FindDeviceById( const wchar_t *pId, finddevice_t nFind );
};
#define DEFAULT_MIX_BUFFER_COUNT 4
#define DEFAULT_MIX_BUFFER_SAMPLE_COUNT MIX_BUFFER_SIZE
struct audio_device_init_params_t
{
const audio_device_description_t *m_pDesc;
void *m_pWindowHandle;
int m_nOutputBufferCount;
int m_nSampleCountPerOutputBuffer;
int m_nOverrideSpeakerConfig; // only used if m_bOverrideSpeakerConfig is true
bool m_bOverrideDevice; // If this is set use m_overrideDevice
bool m_bOverrideSpeakerConfig;
bool m_bPlayEvenWhenNotInFocus;
// When we set the override device it is important to copy the memory since
// the original device description may get realloced and thus become a stale
// pointer.
wchar_t m_overrideDeviceName[AUDIO_DEVICE_NAME_MAX];
int m_nOverrideSubsystem;
inline void OverrideDevice( audio_device_description_t *pDevice )
{
m_bOverrideDevice = true;
V_wcscpy_safe( m_overrideDeviceName, pDevice->m_deviceName );
m_nOverrideSubsystem = pDevice->m_nSubsystemId;
}
inline void OverrideSpeakerConfig( int nSpeakerConfig )
{
Assert(nSpeakerConfig >= 0 && nSpeakerConfig < 8);
m_nOverrideSpeakerConfig = nSpeakerConfig;
m_bOverrideSpeakerConfig = true;
}
audio_device_init_params_t() : m_bOverrideSpeakerConfig(false), m_bOverrideDevice(false) {}
inline void Defaults()
{
m_nOutputBufferCount = DEFAULT_MIX_BUFFER_COUNT;
m_nSampleCountPerOutputBuffer = MIX_BUFFER_SIZE;
m_bOverrideDevice = false;
m_bOverrideSpeakerConfig = false;
m_nOverrideSpeakerConfig = 0;
m_bPlayEvenWhenNotInFocus = true;
m_pWindowHandle = NULL;
}
};
extern int Audio_EnumerateDevices( eSubSystems_t nSubsystem, audio_device_description_t *pDeviceListOut, int nListCount );
extern int Audio_EnumerateXAudio2Devices( audio_device_description_t *pDeviceListOut, int nListCount );
extern int Audio_EnumerateDSoundDevices( audio_device_description_t *pDeviceListOut, int nListCount );
#ifdef POSIX
extern int Audio_EnumerateSDLDevices( audio_device_description_t *pDeviceListOut, int nListCount );
#endif
// return true if there was an error event and the device needs to be restarted
extern bool Audio_PollErrorEvents();
class IAudioDevice2
{
public:
virtual ~IAudioDevice2() {}
virtual void OutputBuffer( int nChannels, CAudioMixBuffer *pChannelArray ) = 0;
virtual void Shutdown( void ) = 0;
virtual int QueuedBufferCount() = 0;
virtual int EmptyBufferCount() = 0;
virtual void CancelOutput( void ) = 0;
virtual void WaitForComplete() = 0;
virtual void UpdateFocus( bool bWindowHasFocus ) = 0;
virtual void ClearBuffer() = 0;
virtual const wchar_t *GetDeviceID() const = 0;
virtual void OutputDebugInfo() const = 0;
virtual bool SetShouldPlayWhenNotInFocus( bool bPlayEvenWhenNotInFocus ) = 0;
inline const char *Name() const { return m_pName; }
inline int ChannelCount() const { return m_nChannels; }
inline int MixChannelCount() const { return m_nChannels > 6 ? 6 : m_nChannels; } // 7.1 mixes as 5.1
inline int BitsPerSample() const { return m_nSampleBits; }
inline int SampleRate() const { return m_nSampleRate; }
inline bool IsSurround() const { return m_nChannels > 2 ? true : false; }
inline bool IsSurroundCenter() const { return m_nChannels > 4 ? true : false; }
inline bool IsActive() const { return m_bIsActive; }
inline bool IsHeadphone() const { return m_bIsHeadphone; } // mixing makes some choices differently for stereo vs headphones, expose that here.
inline bool CanDetectBufferStarvation() { return m_bSupportsBufferStarvationDetection; }
inline bool IsCaptureDevice() { return m_bIsCaptureDevice; }
inline int DeviceSampleBytes( void ) const { return BitsPerSample() / 8; }
// UNDONE: Need to implement these
void Pause() {}
void UnPause() {}
void TransferSamples( uint32 nEndTimeIgnored );
protected:
// NOTE: Derived classes MUST initialize these before returning a device from a factory
const char *m_pName;
int m_nChannels;
int m_nSampleBits;
int m_nSampleRate;
bool m_bIsActive;
bool m_bIsHeadphone;
bool m_bSupportsBufferStarvationDetection;
bool m_bIsCaptureDevice;
};
// device handling
extern IAudioDevice2 *Audio_CreateXAudio2Device( const audio_device_init_params_t &params );
extern IAudioDevice2 *Audio_CreateDSoundDevice( const audio_device_init_params_t &params );
#ifdef POSIX
extern IAudioDevice2 *Audio_CreateSDLDevice( const audio_device_init_params_t &params );
#endif
extern IAudioDevice2 *Audio_CreateNullDevice();
#if IS_WINDOWS_PC
extern bool GetWindowsDefaultAudioDevice( wchar_t *pName, size_t nNameBufSize );
#endif
// speaker config
extern int SpeakerConfigValueToChannelCount( int nSpeakerConfig );
extern int ChannelCountToSpeakerConfigValue( int nChannelCount, bool bIsHeadphone );
// buffer library
extern void ScaleBuffer( float flOutput[MIX_BUFFER_SIZE], const float flInput[MIX_BUFFER_SIZE], float flScale );
extern void ScaleBufferRamp( float flOutput[MIX_BUFFER_SIZE], const float flInput[MIX_BUFFER_SIZE], float flScaleStart, float flScaleEnd );
extern void MixBuffer( float flOutput[MIX_BUFFER_SIZE], const float flInput[MIX_BUFFER_SIZE], float flScale );
extern void MixBufferRamp( float flOutput[MIX_BUFFER_SIZE], const float flInput[MIX_BUFFER_SIZE], float flScaleStart, float flScaleEnd );
inline void ScaleBufferAuto( float flOutput[MIX_BUFFER_SIZE], const float flInput[MIX_BUFFER_SIZE], float flScaleStart, float flScaleEnd )
{
if ( flScaleStart == flScaleEnd )
{
ScaleBuffer( flOutput, flInput, flScaleEnd );
}
else
{
ScaleBufferRamp( flOutput, flInput, flScaleStart, flScaleEnd );
}
}
inline void MixBufferAuto( float flOutput[MIX_BUFFER_SIZE], const float flInput[MIX_BUFFER_SIZE], float flScaleStart, float flScaleEnd )
{
if ( flScaleStart == flScaleEnd )
{
MixBuffer( flOutput, flInput, flScaleEnd );
}
else
{
MixBufferRamp( flOutput, flInput, flScaleStart, flScaleEnd );
}
}
extern void SilenceBuffer( float flBuffer[MIX_BUFFER_SIZE] );
extern void SilenceBuffers( CAudioMixBuffer *pBuffers, int nBufferCount );
extern void SumBuffer2x1( float flOutput[MIX_BUFFER_SIZE], float flInput0[MIX_BUFFER_SIZE], float flScale0, float flInput1[MIX_BUFFER_SIZE], float flScale1 );
extern void SwapBuffersInPlace( float flInput0[MIX_BUFFER_SIZE], float flInput1[MIX_BUFFER_SIZE] );
extern float BufferLevel( float flInput[MIX_BUFFER_SIZE] );
extern float AvergeBufferAmplitude( float flInput[MIX_BUFFER_SIZE] );
extern void ConvertFloat32Int16_Clamp_Interleave2( short *pOut, float *pflLeft, float *pflRight, int nSampleCount );
extern void ConvertFloat32Int16_Clamp_InterleaveStride( short *pOut, int nOutputChannelCount, int nChannelStrideFloats, float *pflChannel0, int nInputChannelCount, int nSampleCount );
#if IS_WINDOWS_PC
void InitCOM();
void ShutdownCOM();
#else
inline void InitCOM() {}
inline void ShutdownCOM() {}
#endif
#endif // SOUNDSYSTEM_LOWLEVEL_H

View File

@@ -0,0 +1,409 @@
//====== Copyright 1996-2005, Valve Corporation, All rights reserved. =======
//
// Purpose:
//
//=============================================================================
#include "winlite.h"
#include "mpafile.h"
#include "soundchars.h"
#include "tier1/utlrbtree.h"
// NOTE: This has to be the last file included!
#include "tier0/memdbgon.h"
// exception class
CMPAException::CMPAException(ErrorIDs ErrorID, const char *szFile, const char *szFunction, bool bGetLastError ) :
m_ErrorID( ErrorID ), m_bGetLastError( bGetLastError )
{
m_szFile = strdup(szFile);
m_szFunction = strdup(szFunction);
}
// copy constructor (necessary for exception throwing without pointers)
CMPAException::CMPAException(const CMPAException& Source)
{
m_ErrorID = Source.m_ErrorID;
m_bGetLastError = Source.m_bGetLastError;
m_szFile = strdup(Source.m_szFile);
m_szFunction = strdup(Source.m_szFunction);
}
// destructor
CMPAException::~CMPAException()
{
if( m_szFile )
free( (void*)m_szFile );
if( m_szFunction )
free( (void*)m_szFunction );
}
// should be in resource file for multi language applications
const char *m_szErrors[] =
{
"Can't open the file.",
"Can't set file position.",
"Can't read from file.",
"Reached end of buffer.",
"No VBR Header found.",
"Incomplete VBR Header.",
"No subsequent frame found within tolerance range.",
"No frame found."
};
#define MAX_ERR_LENGTH 256
void CMPAException::ShowError()
{
char szErrorMsg[MAX_ERR_LENGTH] = {0};
char szHelp[MAX_ERR_LENGTH];
// this is not buffer-overflow-proof!
if( m_szFunction )
{
sprintf( szHelp, _T("%s: "), m_szFunction );
strcat( szErrorMsg, szHelp );
}
if( m_szFile )
{
sprintf( szHelp, _T("'%s'\n"), m_szFile );
strcat( szErrorMsg, szHelp );
}
strcat( szErrorMsg, m_szErrors[m_ErrorID] );
#if defined(WIN32) && !defined(_X360)
if( m_bGetLastError )
{
// get error message of last system error id
LPVOID pMsgBuf;
if ( FormatMessage( FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
NULL,
GetLastError(),
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), // Default language
(LPTSTR) &pMsgBuf,
0,
NULL ))
{
strcat( szErrorMsg, "\n" );
strcat( szErrorMsg, (const char *)pMsgBuf );
LocalFree( pMsgBuf );
}
}
#endif
// show error message
Warning( "%s\n", szErrorMsg );
}
// 1KB is inital buffersize, each time the buffer needs to be increased it is doubled
const uint32 CMPAFile::m_dwInitBufferSize = 1024;
CMPAFile::CMPAFile( const char * szFile, uint32 dwFileOffset, FileHandle_t hFile ) :
m_pBuffer(NULL), m_dwBufferSize(0), m_dwBegin( dwFileOffset ), m_dwEnd(0),
m_dwNumTimesRead(0), m_bVBRFile( false ), m_pVBRHeader(NULL), m_bMustReleaseFile( false ),
m_pMPAHeader(NULL), m_hFile( hFile ), m_szFile(NULL), m_dwFrameNo(1)
{
// open file, if not already done
if( m_hFile == FILESYSTEM_INVALID_HANDLE )
{
Open( szFile );
m_bMustReleaseFile = true;
}
// save filename
m_szFile = strdup( szFile );
// set end of MPEG data (assume file end)
if( m_dwEnd <= 0 )
{
// get file size
m_dwEnd = g_pFullFileSystem->Size( m_hFile );
}
// find first valid MPEG frame
m_pMPAHeader = new CMPAHeader( this );
// is VBR header available?
CVBRHeader::VBRHeaderType HeaderType = CVBRHeader::NoHeader;
uint32 dwOffset = m_pMPAHeader->m_dwSyncOffset;
if( CVBRHeader::IsVBRHeaderAvailable( this, HeaderType, dwOffset ) )
{
try
{
// read out VBR header
m_pVBRHeader = new CVBRHeader( this, HeaderType, dwOffset );
m_bVBRFile = true;
m_dwBytesPerSec = m_pVBRHeader->m_dwBytesPerSec;
if( m_pVBRHeader->m_dwBytes > 0 )
m_dwEnd = m_dwBegin + m_pVBRHeader->m_dwBytes;
}
catch(CMPAException& Exc)
{
Exc.ShowError();
}
}
if( !m_pVBRHeader )
{
// always skip empty (32kBit) frames
m_bVBRFile = m_pMPAHeader->SkipEmptyFrames();
m_dwBytesPerSec = m_pMPAHeader->GetBytesPerSecond();
}
}
bool CMPAFile::GetNextFrame()
{
uint32 dwOffset = m_pMPAHeader->m_dwSyncOffset + m_pMPAHeader->m_dwRealFrameSize;
try
{
CMPAHeader* pFrame = new CMPAHeader( this, dwOffset, false );
delete m_pMPAHeader;
m_pMPAHeader = pFrame;
if( m_dwFrameNo > 0 )
m_dwFrameNo++;
}
catch(...)
{
return false;
}
return true;
}
bool CMPAFile::GetPrevFrame()
{
uint32 dwOffset = m_pMPAHeader->m_dwSyncOffset-MPA_HEADER_SIZE;
try
{
// look backward from dwOffset on
CMPAHeader* pFrame = new CMPAHeader( this, dwOffset, false, true );
delete m_pMPAHeader;
m_pMPAHeader = pFrame;
if( m_dwFrameNo > 0 )
m_dwFrameNo --;
}
catch(...)
{
return false;
}
return true;
}
bool CMPAFile::GetFirstFrame()
{
uint32 dwOffset = 0;
try
{
CMPAHeader* pFrame = new CMPAHeader( this, dwOffset, false );
delete m_pMPAHeader;
m_pMPAHeader = pFrame;
m_dwFrameNo = 1;
}
catch(...)
{
return false;
}
return true;
}
bool CMPAFile::GetLastFrame()
{
uint32 dwOffset = m_dwEnd - m_dwBegin - MPA_HEADER_SIZE;
try
{
// look backward from dwOffset on
CMPAHeader* pFrame = new CMPAHeader( this, dwOffset, false, true );
delete m_pMPAHeader;
m_pMPAHeader = pFrame;
m_dwFrameNo = 0;
}
catch(...)
{
return false;
}
return true;
}
// destructor
CMPAFile::~CMPAFile(void)
{
delete m_pMPAHeader;
if( m_pVBRHeader )
delete m_pVBRHeader;
if( m_pBuffer )
delete[] m_pBuffer;
// close file
if( m_bMustReleaseFile )
g_pFullFileSystem->Close( m_hFile );
if( m_szFile )
free( (void*)m_szFile );
}
// open file
void CMPAFile::Open( const char * szFilename )
{
// open with CreateFile (no limitation of 128byte filename length, like in mmioOpen)
m_hFile = g_pFullFileSystem->Open( szFilename, "rb", "GAME" );//::CreateFile( szFilename, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL );
if( m_hFile == FILESYSTEM_INVALID_HANDLE )
{
// throw error
throw CMPAException( CMPAException::ErrOpenFile, szFilename, _T("CreateFile"), true );
}
}
// set file position
void CMPAFile::SetPosition( int offset )
{
/*
LARGE_INTEGER liOff;
liOff.QuadPart = lOffset;
liOff.LowPart = ::SetFilePointer(m_hFile, liOff.LowPart, &liOff.HighPart, dwMoveMethod );
if (liOff.LowPart == INVALID_SET_FILE_POINTER && GetLastError() != NO_ERROR )
{
// throw error
throw CMPAException( CMPAException::ErrSetPosition, m_szFile, _T("SetFilePointer"), true );
}
*/
g_pFullFileSystem->Seek( m_hFile, offset, FILESYSTEM_SEEK_HEAD );
}
// read from file, return number of bytes read
uint32 CMPAFile::Read( void *pData, uint32 dwSize, uint32 dwOffset )
{
uint32 dwBytesRead = 0;
// set position first
SetPosition( m_dwBegin+dwOffset );
//if( !::ReadFile( m_hFile, pData, dwSize, &dwBytesRead, NULL ) )
// throw CMPAException( CMPAException::ErrReadFile, m_szFile, _T("ReadFile"), true );
dwBytesRead = g_pFullFileSystem->Read( pData, dwSize, m_hFile );
return dwBytesRead;
}
// convert from big endian to native format (Intel=little endian) and return as uint32 (32bit)
uint32 CMPAFile::ExtractBytes( uint32& dwOffset, uint32 dwNumBytes, bool bMoveOffset )
{
Assert( dwNumBytes > 0 );
Assert( dwNumBytes <= 4 ); // max 4 byte
// enough bytes in buffer, otherwise read from file
if( !m_pBuffer || ( ((int)(m_dwBufferSize - dwOffset)) < (int)dwNumBytes) )
FillBuffer( dwOffset + dwNumBytes );
uint32 dwResult = 0;
// big endian extract (most significant byte first) (will work on little and big-endian computers)
uint32 dwNumByteShifts = dwNumBytes - 1;
for( uint32 n=dwOffset; n < dwOffset+dwNumBytes; n++ )
{
dwResult |= m_pBuffer[n] << 8*dwNumByteShifts--; // the bit shift will do the correct byte order for you
}
if( bMoveOffset )
dwOffset += dwNumBytes;
return dwResult;
}
// throws exception if not possible
void CMPAFile::FillBuffer( uint32 dwOffsetToRead )
{
uint32 dwNewBufferSize;
// calc new buffer size
if( m_dwBufferSize == 0 )
dwNewBufferSize = m_dwInitBufferSize;
else
dwNewBufferSize = m_dwBufferSize*2;
// is it big enough?
if( dwNewBufferSize < dwOffsetToRead )
dwNewBufferSize = dwOffsetToRead;
// reserve new buffer
BYTE* pNewBuffer = new BYTE[dwNewBufferSize];
// take over data from old buffer
if( m_pBuffer )
{
memcpy( pNewBuffer, m_pBuffer, m_dwBufferSize );
// release old buffer
delete[] m_pBuffer;
}
m_pBuffer = (char*)pNewBuffer;
// read <dwNewBufferSize-m_dwBufferSize> bytes from offset <m_dwBufferSize>
uint32 dwBytesRead = Read( m_pBuffer+m_dwBufferSize, dwNewBufferSize-m_dwBufferSize, m_dwBufferSize );
// no more bytes in buffer than read out from file
m_dwBufferSize += dwBytesRead;
}
// Uses mp3 code from: http://www.codeproject.com/audio/MPEGAudioInfo.asp
struct MP3Duration_t
{
FileNameHandle_t h;
float duration;
static bool LessFunc( const MP3Duration_t& lhs, const MP3Duration_t& rhs )
{
return lhs.h < rhs.h;
}
};
CUtlRBTree< MP3Duration_t, int > g_MP3Durations( 0, 0, MP3Duration_t::LessFunc );
float GetMP3Duration_Helper( char const *filename )
{
float duration = 60.0f;
// See if it's in the RB tree already...
char fn[ 512 ];
V_snprintf( fn, sizeof( fn ), "sound/%s", PSkipSoundChars( filename ) );
FileNameHandle_t h = g_pFullFileSystem->FindOrAddFileName( fn );
MP3Duration_t search;
search.h = h;
int idx = g_MP3Durations.Find( search );
if ( idx != g_MP3Durations.InvalidIndex() )
{
return g_MP3Durations[ idx ].duration;
}
try
{
CMPAFile MPAFile( fn, 0 );
if ( MPAFile.m_dwBytesPerSec != 0 )
{
duration = (float)(MPAFile.m_dwEnd - MPAFile.m_dwBegin) / (float)MPAFile.m_dwBytesPerSec;
}
}
catch ( ... )
{
}
search.duration = duration;
g_MP3Durations.Insert( search );
return duration;
}

View File

@@ -0,0 +1,125 @@
//====== Copyright 1996-2005, Valve Corporation, All rights reserved. =======
//
// Purpose: Uses mp3 code from: http://www.codeproject.com/audio/MPEGAudioInfo.asp
//
// There don't appear to be any licensing restrictions for using this code:
//
/*
- Readme - MPEG Audio Info Tool V2.0 - 2004-11-01
Description:
This tool can display information about MPEG audio files. It supports
MPEG1, MPEG2, MPEG2.5 in all three layers. You can get all the fields
from the MPEG audio frame in each frame of the file. Additionally you
can check the whole file for inconsistencies.
This tool was written as an example on how to use the classes:
CMPAFile, CMPAHeader, CVBRHeader and CMPAException.
The article MPEG Audio Frame Header on Sourceproject
[http://www.codeproject.com/audio/MPEGAudioInfo.asp]
provides additional information about these classes and the frame header
in general.
This tool was written with MS Visual C++ 7.1. The MFC library is
statically linked.
*/
//=============================================================================
#ifndef MPAFILE_H
#define MPAFILE_H
#ifdef _WIN32
#pragma once
#endif
#pragma once
#include "vbrheader.h"
#include "mpaheader.h"
#include "filesystem.h"
// exception class
class CMPAException
{
public:
enum ErrorIDs
{
ErrOpenFile,
ErrSetPosition,
ErrReadFile,
EndOfBuffer,
NoVBRHeader,
IncompleteVBRHeader,
NoFrameInTolerance,
NoFrame
};
CMPAException( ErrorIDs ErrorID, const char *szFile, const char *szFunction = NULL, bool bGetLastError=false );
// copy constructor (necessary because of LPSTR members)
CMPAException(const CMPAException& Source);
~CMPAException(void);
ErrorIDs GetErrorID() { return m_ErrorID; }
void ShowError();
private:
ErrorIDs m_ErrorID;
bool m_bGetLastError;
const char *m_szFunction;
const char *m_szFile;
};
class CMPAFile
{
public:
CMPAFile( const char *szFile, uint32 dwFileOffset, FileHandle_t hFile = FILESYSTEM_INVALID_HANDLE );
~CMPAFile(void);
uint32 ExtractBytes( uint32 &dwOffset, uint32 dwNumBytes, bool bMoveOffset = true );
const char *GetFilename() const { return m_szFile; };
bool GetNextFrame();
bool GetPrevFrame();
bool GetFirstFrame();
bool GetLastFrame();
private:
static const uint32 m_dwInitBufferSize;
// methods for file access
void Open( const char *szFilename );
void SetPosition( int offset );
uint32 Read( void *pData, uint32 dwSize, uint32 dwOffset );
void FillBuffer( uint32 dwOffsetToRead );
static uint32 m_dwBufferSizes[MAXTIMESREAD];
// concerning file itself
FileHandle_t m_hFile;
const char *m_szFile;
bool m_bMustReleaseFile;
public:
uint32 m_dwBegin; // offset of first MPEG Audio frame
uint32 m_dwEnd; // offset of last MPEG Audio frame (estimated)
bool m_bVBRFile;
uint32 m_dwBytesPerSec;
CMPAHeader* m_pMPAHeader;
uint32 m_dwFrameNo;
CVBRHeader* m_pVBRHeader; // XING or VBRI
// concerning read-buffer
uint32 m_dwNumTimesRead;
char *m_pBuffer;
uint32 m_dwBufferSize;
};
#endif // MPAFILE_H

View File

@@ -0,0 +1,336 @@
//====== Copyright 1996-2005, Valve Corporation, All rights reserved. =======
//
// Purpose:
//
//=============================================================================
#if defined( WIN32) && !defined( _X360 )
#include "winlite.h"
#endif
#include "tier0/platform.h"
#include "mpafile.h"
// NOTE: This has to be the last file included!
#include "tier0/memdbgon.h"
// static variables
const char *CMPAHeader::m_szLayers[] = { "Layer I", "Layer II", "Layer III" };
const char *CMPAHeader::m_szMPEGVersions[] = {"MPEG 2.5", "", "MPEG 2", "MPEG 1" };
const char *CMPAHeader::m_szChannelModes[] = { "Stereo", "Joint Stereo", "Dual Channel", "Single Channel" };
const char *CMPAHeader::m_szEmphasis[] = { "None", "50/15ms", "", "CCIT J.17" };
// tolerance range, look at expected offset +/- m_dwTolerance for subsequent frames
const uint32 CMPAHeader::m_dwTolerance = 3; // 3 bytes
// max. range where to look for frame sync
const uint32 CMPAHeader::m_dwMaxRange = ( 256 * 1024 );
// sampling rates in hertz: 1. index = MPEG Version ID, 2. index = sampling rate index
const uint32 CMPAHeader::m_dwSamplingRates[4][3] =
{
{11025, 12000, 8000, }, // MPEG 2.5
{0, 0, 0, }, // reserved
{22050, 24000, 16000, }, // MPEG 2
{44100, 48000, 32000 } // MPEG 1
};
// padding sizes in bytes for different layers: 1. index = layer
const uint32 CMPAHeader::m_dwPaddingSizes[3] =
{
4, // Layer1
1, // Layer2
1 // Layer3
};
// bitrates: 1. index = LSF, 2. index = Layer, 3. index = bitrate index
const uint32 CMPAHeader::m_dwBitrates[2][3][15] =
{
{ // MPEG 1
{0,32,64,96,128,160,192,224,256,288,320,352,384,416,448,}, // Layer1
{0,32,48,56, 64, 80, 96,112,128,160,192,224,256,320,384,}, // Layer2
{0,32,40,48, 56, 64, 80, 96,112,128,160,192,224,256,320,} // Layer3
},
{ // MPEG 2, 2.5
{0,32,48,56,64,80,96,112,128,144,160,176,192,224,256,}, // Layer1
{0,8,16,24,32,40,48,56,64,80,96,112,128,144,160,}, // Layer2
{0,8,16,24,32,40,48,56,64,80,96,112,128,144,160,} // Layer3
}
};
// Samples per Frame: 1. index = LSF, 2. index = Layer
const uint32 CMPAHeader::m_dwSamplesPerFrames[2][3] =
{
{ // MPEG 1
384, // Layer1
1152, // Layer2
1152 // Layer3
},
{ // MPEG 2, 2.5
384, // Layer1
1152, // Layer2
576 // Layer3
}
};
// Samples per Frame / 8
const uint32 CMPAHeader::m_dwCoefficients[2][3] =
{
{ // MPEG 1
48, // Layer1
144, // Layer2
144 // Layer3
},
{ // MPEG 2, 2.5
48, // Layer1
144, // Layer2
72 // Layer3
}
};
// needed later for CRC check
// sideinformation size: 1.index = lsf, 2. index = layer, 3. index = mono
const uint32 CMPAHeader::m_dwSideinfoSizes[2][3][2] =
{
{ // MPEG 1 (not mono, mono
{0,0}, // Layer1
{0,0}, // Layer2
{9,17} // Layer3
},
{ // MPEG 2, 2.5
{0,0}, // Layer1
{0,0}, // Layer2
{17,32} // Layer3
}
};
// constructor (throws exception if no frame found)
CMPAHeader::CMPAHeader( CMPAFile* pMPAFile, uint32 dwExpectedOffset, bool bSubsequentFrame, bool bReverse ) :
m_pMPAFile( pMPAFile ), m_dwSyncOffset( dwExpectedOffset ), m_dwRealFrameSize( 0 )
{
// first check at expected offset (extended for not subsequent frames)
HeaderError error = IsSync( m_dwSyncOffset, !bSubsequentFrame );
int nStep=1;
int nSyncOffset;
while( error != noError )
{
// either look in tolerance range
if( bSubsequentFrame )
{
if( nStep > m_dwTolerance )
{
// out of tolerance range
throw CMPAException( CMPAException::NoFrameInTolerance, pMPAFile->GetFilename() );
}
// look around dwExpectedOffset with increasing steps (+1,-1,+2,-2,...)
if( m_dwSyncOffset <= dwExpectedOffset )
{
nSyncOffset = dwExpectedOffset + nStep;
}
else
{
nSyncOffset = dwExpectedOffset - nStep++;
}
}
// just go forward/backward to find sync
else
{
nSyncOffset = ((int)m_dwSyncOffset) + (bReverse?-1:+1);
}
// is new offset within valid range?
if( nSyncOffset < 0 || nSyncOffset > (int)((pMPAFile->m_dwEnd - pMPAFile->m_dwBegin) - MPA_HEADER_SIZE) || abs( (long)(nSyncOffset-dwExpectedOffset) ) > m_dwMaxRange )
{
// out of tolerance range
throw CMPAException( CMPAException::NoFrame, pMPAFile->GetFilename() );
}
m_dwSyncOffset = nSyncOffset;
// found sync?
error = IsSync( m_dwSyncOffset, !bSubsequentFrame );
}
}
// destructor
CMPAHeader::~CMPAHeader()
{
}
// skips first 32kbit/s or lower bitrate frames to estimate bitrate (returns true if bitrate is variable)
bool CMPAHeader::SkipEmptyFrames()
{
if( m_dwBitrate > 32 )
return false;
uint32 dwHeader;
try
{
while( m_dwBitrate <= 32 )
{
m_dwSyncOffset += m_dwComputedFrameSize + MPA_HEADER_SIZE;
dwHeader = m_pMPAFile->ExtractBytes( m_dwSyncOffset, MPA_HEADER_SIZE, false );
if( IsSync( dwHeader, false ) != noError )
return false;
}
}
catch(CMPAException& /*Exc*/) // just catch the exception and return false
{
return false;
}
return true;
}
// in dwHeader stands 32bit header in big-endian format: frame sync at the end!
// because shifts do only work for integral types!!!
CMPAHeader::HeaderError CMPAHeader::DecodeHeader( uint32 dwHeader, bool bSimpleDecode )
{
// Check SYNC bits (last eleven bits set)
if( (dwHeader >> 24 != 0xff) || ((((dwHeader >> 16))&0xe0) != 0xe0) )
return noSync;
// get MPEG version
m_Version = (MPAVersion)((dwHeader >> 19) & 0x03); // mask only the rightmost 2 bits
if( m_Version == MPEGReserved )
return headerCorrupt;
if( m_Version == MPEG1 )
m_bLSF = false;
else
m_bLSF = true;
// get layer (0 = layer1, 2 = layer2, ...)
m_Layer = (MPALayer)(3 - ((dwHeader >> 17) & 0x03));
if( m_Layer == LayerReserved )
return headerCorrupt;
// protection bit (inverted)
m_bCRC = !((dwHeader >> 16) & 0x01);
// bitrate
BYTE bIndex = (BYTE)((dwHeader >> 12) & 0x0F);
if( bIndex == 0x0F ) // all bits set is reserved
return headerCorrupt;
m_dwBitrate = m_dwBitrates[m_bLSF][m_Layer][bIndex] * 1000; // convert from kbit to bit
if( m_dwBitrate == 0 ) // means free bitrate (is unsupported yet)
return freeBitrate;
// sampling rate
bIndex = (BYTE)((dwHeader >> 10) & 0x03);
if( bIndex == 0x03 ) // all bits set is reserved
return headerCorrupt;
m_dwSamplesPerSec = m_dwSamplingRates[m_Version][bIndex];
// padding bit
m_dwPaddingSize = m_dwPaddingSizes[m_Layer] * ((dwHeader >> 9) & 0x01);
// calculate frame size
m_dwComputedFrameSize = (m_dwCoefficients[m_bLSF][m_Layer] * m_dwBitrate / m_dwSamplesPerSec) + m_dwPaddingSize;
m_dwSamplesPerFrame = m_dwSamplesPerFrames[m_bLSF][m_Layer];
if( !bSimpleDecode )
{
// private bit
m_bPrivate = (dwHeader >> 8) & 0x01;
// channel mode
m_ChannelMode = (ChannelMode)((dwHeader >> 6) & 0x03);
// mode extension (currently not used)
m_ModeExt = (BYTE)((dwHeader >> 4) & 0x03);
// copyright bit
m_bCopyright = (dwHeader >> 3) & 0x01;
// original bit
m_bCopyright = (dwHeader >> 2) & 0x01;
// emphasis
m_Emphasis = (Emphasis)(dwHeader & 0x03);
if( m_Emphasis == EmphReserved )
return headerCorrupt;
}
return noError;
}
CMPAHeader::HeaderError CMPAHeader::IsSync( uint32 dwOffset, bool bExtended )
{
HeaderError error = noSync;
uint32 dwHeader = m_pMPAFile->ExtractBytes( dwOffset, MPA_HEADER_SIZE, false );
// sync bytes found?
if( (dwHeader & 0xFFE00000) == 0xFFE00000 )
{
error = DecodeHeader( dwHeader );
if( error == noError )
{
// enough buffer to do extended check?
if( bExtended )
{
// recursive call (offset for next frame header)
uint32 dwOffset = m_dwSyncOffset+m_dwComputedFrameSize;
try
{
CMPAHeader m_SubsequentFrame( m_pMPAFile, dwOffset, true );
m_dwRealFrameSize = m_SubsequentFrame.m_dwSyncOffset - m_dwSyncOffset;
}
catch( CMPAException& Exc )
{
// could not find any subsequent frame, assume it is the last frame
if( Exc.GetErrorID() == CMPAException::NoFrame )
{
if( dwOffset + m_pMPAFile->m_dwBegin > m_pMPAFile->m_dwEnd )
m_dwRealFrameSize = m_pMPAFile->m_dwEnd - m_pMPAFile->m_dwBegin - m_dwSyncOffset;
else
m_dwRealFrameSize = m_dwComputedFrameSize;
error = noError;
}
else
error = noSync;
}
}
}
}
return error;
}
// CRC-16 lookup table
const uint16 CMPAHeader::wCRC16Table[256] =
{
0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,
0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,
0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,
0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,
0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,
0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,
0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,
0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,
0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,
0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,
0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,
0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,
0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,
0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,
0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,
0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,
0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,
0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,
0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,
0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,
0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040
};

View File

@@ -0,0 +1,116 @@
//====== Copyright 1996-2005, Valve Corporation, All rights reserved. =======
//
// Purpose:
//
//=============================================================================
#ifndef MPAHEADER_H
#define MPAHEADER_H
#ifdef _WIN32
#pragma once
#endif
#pragma once
#define MPA_HEADER_SIZE 4 // MPEG-Audio Header Size 32bit
#define MAXTIMESREAD 5
class CMPAFile;
class CMPAHeader
{
public:
CMPAHeader( CMPAFile* pMPAFile, uint32 dwExpectedOffset = 0, bool bSubsequentFrame = false, bool bReverse = false );
~CMPAHeader();
bool SkipEmptyFrames();
// bitrate is in bit per second, to calculate in bytes => (/ 8)
uint32 GetBytesPerSecond() const { return m_dwBitrate / 8; };
// calc number of seconds from number of frames
uint32 GetLengthSecond(uint32 dwNumFrames) const { return dwNumFrames * m_dwSamplesPerFrame / m_dwSamplesPerSec; };
uint32 GetBytesPerSecond( uint32 dwNumFrames, uint32 dwNumBytes ) const { return dwNumBytes / GetLengthSecond( dwNumFrames ); };
bool IsMono() const { return (m_ChannelMode == SingleChannel)?true:false; };
// true if MPEG2/2.5 otherwise false
bool IsLSF() const { return m_bLSF; };
private:
static const uint32 m_dwMaxRange;
static const uint32 m_dwTolerance;
static const uint32 m_dwSamplingRates[4][3];
static const uint32 m_dwPaddingSizes[3];
static const uint32 m_dwBitrates[2][3][15];
static const uint32 m_dwSamplesPerFrames[2][3];
static const uint32 m_dwCoefficients[2][3];
// necessary for CRC check (not yet implemented)
static const uint32 m_dwSideinfoSizes[2][3][2];
static const uint16 wCRC16Table[256];
bool m_bLSF; // true means lower sampling frequencies (=MPEG2/MPEG2.5)
CMPAFile* m_pMPAFile;
public:
static const char * m_szLayers[];
static const char * m_szMPEGVersions[];
static const char * m_szChannelModes[];
static const char * m_szEmphasis[];
enum MPAVersion
{
MPEG25 = 0,
MPEGReserved,
MPEG2,
MPEG1
}m_Version;
enum MPALayer
{
Layer1,
Layer2,
Layer3,
LayerReserved
}m_Layer;
enum Emphasis
{
EmphNone = 0,
Emph5015,
EmphReserved,
EmphCCITJ17
}m_Emphasis;
enum ChannelMode
{
Stereo,
JointStereo,
DualChannel,
SingleChannel
}m_ChannelMode;
uint32 m_dwSamplesPerSec;
uint32 m_dwSamplesPerFrame;
uint32 m_dwBitrate; // in bit per second (1 kb = 1000 bit, not 1024)
uint32 m_dwSyncOffset;
uint32 m_dwComputedFrameSize, m_dwRealFrameSize;
uint32 m_dwPaddingSize;
// flags
bool m_bCopyright, m_bPrivate, m_bOriginal;
bool m_bCRC;
uint8 m_ModeExt;
private:
enum HeaderError
{
noError,
noSync,
freeBitrate,
headerCorrupt
};
HeaderError DecodeHeader( uint32 dwHeader, bool bSimpleDecode = false );
inline HeaderError IsSync( uint32 dwOffset, bool bExtended );
};
#endif // MPAHEADER_H

View File

@@ -0,0 +1,101 @@
//===== Copyright <20> 1996-2005, Valve Corporation, All rights reserved. ======//
//
// Purpose:
//
// $Workfile: $
// $Date: $
// $NoKeywords: $
//===========================================================================//
#ifndef SND_AUDIO_SOURCE_H
#define SND_AUDIO_SOURCE_H
#ifdef _WIN32
#pragma once
#endif
#include "tier0/platform.h"
//-----------------------------------------------------------------------------
// Forward declarations
//-----------------------------------------------------------------------------
class CAudioSource;
class IAudioDevice;
struct channel_t;
//-----------------------------------------------------------------------------
// Purpose: This is an instance of an audio source.
// Mixers are attached to channels and reference an audio source.
// Mixers are specific to the sample format and source format.
// Mixers are never re-used, so they can track instance data like
// sample position, fractional sample, stream cache, faders, etc.
//-----------------------------------------------------------------------------
abstract_class CAudioMixer
{
public:
virtual ~CAudioMixer( void ) {}
// UNDONE: time compress
virtual bool MixDataToDevice( IAudioDevice *pDevice, channel_t *pChannel, int startSample, int sampleCount, int outputRate, bool forward = true ) = 0;
virtual void IncrementSamples( channel_t *pChannel, int startSample, int sampleCount,int outputRate, bool forward = true ) = 0;
virtual bool SkipSamples( IAudioDevice *pDevice, channel_t *pChannel, int startSample, int sampleCount, int outputRate, bool forward = true ) = 0;
virtual CAudioSource *GetSource( void ) = 0;
virtual int GetSamplePosition( void ) = 0;
virtual int GetScubPosition( void ) = 0;
virtual bool SetSamplePosition( int position, bool scrubbing = false ) = 0;
virtual void SetLoopPosition( int position ) = 0;
virtual int GetStartPosition( void ) = 0;
virtual bool GetActive( void ) = 0;
virtual void SetActive( bool active ) = 0;
virtual void SetModelIndex( int index ) = 0;
virtual int GetModelIndex( void ) const = 0;
virtual void SetDirection( bool forward ) = 0;
virtual bool GetDirection( void ) const = 0;
virtual void SetAutoDelete( bool autodelete ) = 0;
virtual bool GetAutoDelete( void ) const = 0;
virtual void SetVolume( float volume ) = 0;
virtual channel_t *GetChannel() = 0;
};
//-----------------------------------------------------------------------------
// Purpose: A source is an abstraction for a stream, cached file, or procedural
// source of audio.
//-----------------------------------------------------------------------------
class CSentence;
abstract_class CAudioSource
{
public:
CAudioSource( void );
virtual ~CAudioSource( void );
// Create an instance (mixer) of this audio source
virtual CAudioMixer *CreateMixer( void ) = 0;
virtual int GetOutputData( void **pData, int samplePosition, int sampleCount, bool forward = true ) = 0;
virtual int SampleRate( void ) = 0;
virtual int SampleSize( void ) = 0;
virtual int SampleCount( void ) = 0;
virtual float TrueSampleSize( void ) = 0;
virtual bool IsLooped( void ) = 0;
virtual bool IsStreaming( void ) = 0;
virtual float GetRunningLength( void ) = 0;
virtual int GetNumChannels() = 0;
virtual CSentence *GetSentence( void ) { return NULL; };
};
extern CAudioSource *AudioSource_Create( const char *pName );
#endif // SND_AUDIO_SOURCE_H

View File

@@ -0,0 +1,105 @@
//===== Copyright <20> 1996-2005, Valve Corporation, All rights reserved. ======//
//
// Purpose:
//
// $Workfile: $
// $Date: $
// $NoKeywords: $
//===========================================================================//
#ifndef SND_DEVICE_H
#define SND_DEVICE_H
#ifdef _WIN32
#pragma once
#endif
#include "tier0/platform.h"
//-----------------------------------------------------------------------------
// 4.28 fixed point stuff for real-time resampling
//-----------------------------------------------------------------------------
#define FIX_BITS 28
#define FIX_SCALE (1 << FIX_BITS)
#define FIX_MASK ((1 << FIX_BITS)-1)
#define FIX_FLOAT(a) ((int)((a) * FIX_SCALE))
#define FIX(a) (((int)(a)) << FIX_BITS)
#define FIX_INTPART(a) (((int)(a)) >> FIX_BITS)
#define FIX_FRACTION(a,b) (FIX(a)/(b))
#define FIX_FRACPART(a) ((a) & FIX_MASK)
typedef unsigned int fixedint;
//-----------------------------------------------------------------------------
// sound rate defines
//-----------------------------------------------------------------------------
#define SOUND_DMA_SPEED 44100 // hardware playback rate
#define SOUND_11k 11025 // 11khz sample rate
#define SOUND_22k 22050 // 22khz sample rate
#define SOUND_44k 44100 // 44khz sample rate
#define SOUND_ALL_RATES 1 // mix all sample rates
//-----------------------------------------------------------------------------
// Information about the channel
//-----------------------------------------------------------------------------
struct channel_t
{
int leftvol;
int rightvol;
float pitch;
};
//-----------------------------------------------------------------------------
// The audio device is responsible for mixing
//-----------------------------------------------------------------------------
abstract_class IAudioDevice
{
public:
// This initializes the sound hardware. true on success, false on failure
virtual bool Init( void ) = 0;
// This releases all sound hardware
virtual void Shutdown( void ) = 0;
// device parameters
virtual const char *DeviceName( void ) const = 0;
virtual int DeviceChannels( void ) const = 0; // 1 = mono, 2 = stereo
virtual int DeviceSampleBits( void ) const = 0; // bits per sample (8 or 16)
virtual int DeviceSampleBytes( void ) const = 0; // above / 8
virtual int DeviceSampleRate( void ) const = 0; // Actual DMA speed
virtual int DeviceSampleCount( void ) const = 0; // Total samples in buffer
// Called each time a new paint buffer is mixed (may be multiple times per frame)
virtual void MixBegin( void ) = 0;
// Main mixing routines
virtual void Mix8Mono( channel_t *pChannel, char *pData, int outputOffset, int inputOffset, fixedint rateScaleFix, int outCount, int timecompress, bool forward = true ) = 0;
virtual void Mix8Stereo( channel_t *pChannel, char *pData, int outputOffset, int inputOffset, fixedint rateScaleFix, int outCount, int timecompress, bool forward = true ) = 0;
virtual void Mix16Mono( channel_t *pChannel, short *pData, int outputOffset, int inputOffset, fixedint rateScaleFix, int outCount, int timecompress, bool forward = true ) = 0;
virtual void Mix16Stereo( channel_t *pChannel, short *pData, int outputOffset, int inputOffset, fixedint rateScaleFix, int outCount, int timecompress, bool forward = true ) = 0;
// Size of the paint buffer in samples
virtual int PaintBufferSampleCount( void ) const = 0;
// Adds a mixer to be mixed
virtual void AddSource( CAudioMixer *pSource ) = 0;
// Stops all sounds
virtual void StopSounds( void ) = 0;
// Updates sound mixing
virtual void Update( float time ) = 0;
// Resets the device
virtual void Flush( void ) = 0;
virtual int FindSourceIndex( CAudioMixer *pSource ) = 0;
virtual CAudioMixer *GetMixerForSource( CAudioSource *source ) = 0;
virtual void FreeChannel( int channelIndex ) = 0;
};
#endif // SND_DEVICE_H

View File

@@ -0,0 +1,16 @@
//========= Copyright <20> 1996-2005, Valve Corporation, All rights reserved. ============//
//
// Purpose:
//
// $NoKeywords: $
//=============================================================================//
#ifndef SND_IO_H
#define SND_IO_H
#pragma once
class IFileReadBinary;
extern IFileReadBinary *g_pSndIO;
#endif // SND_IO_H

View File

@@ -0,0 +1,51 @@
//========= Copyright <20> 1996-2005, Valve Corporation, All rights reserved. ============//
//
// Purpose:
//
// $NoKeywords: $
//=============================================================================//
#ifndef SND_SFX_H
#define SND_SFX_H
#if defined( _WIN32 )
#pragma once
#endif
class CAudioSource;
class CSfxTable
{
public:
CSfxTable();
// gets sound name, possible decorated with prefixes
virtual const char *getname( char *pBuf, size_t bufLen );
// gets the filename, the part after the optional prefixes
const char *GetFileName(char *pBuf, size_t bufLen );
FileNameHandle_t GetFileNameHandle();
void SetNamePoolIndex( int index );
bool IsPrecachedSound();
void OnNameChanged( const char *pName );
int m_namePoolIndex;
CAudioSource *pSource;
bool m_bUseErrorFilename : 1;
bool m_bIsUISound : 1;
bool m_bIsLateLoad : 1;
bool m_bMixGroupsCached : 1;
bool m_bIsMusic : 1;
bool m_bIsCreatedByQueuedLoader : 1;
byte m_mixGroupCount;
// UNDONE: Use a fixed bit vec here?
byte m_mixGroupList[8];
private:
// Only set in debug mode so you can see the name.
const char *m_pDebugName;
};
#endif // SND_SFX_H

View File

@@ -0,0 +1,98 @@
//====== Copyright <20> 1996-2005, Valve Corporation, All rights reserved. =======
//
// Purpose:
//
//=============================================================================
#ifndef SNDINFO_H
#define SNDINFO_H
#ifdef _WIN32
#pragma once
#endif
class Vector;
#include "utlsymbol.h"
// Handy defines for EmitSound
#define SOUND_FROM_LOCAL_PLAYER -1
#define SOUND_FROM_WORLD 0
// These are used to feed a soundlevel to the sound system and have it use
// goldsrc-type attenuation. We should use this as little as possible and
// phase it out as soon as possible.
// Take a regular sndlevel and convert it to compatibility mode.
#define SNDLEVEL_TO_COMPATIBILITY_MODE( x ) ((soundlevel_t)(int)( (x) + 256 ))
// Take a compatibility-mode sndlevel and get the REAL sndlevel out of it.
#define SNDLEVEL_FROM_COMPATIBILITY_MODE( x ) ((soundlevel_t)(int)( (x) - 256 ))
// Tells if the given sndlevel is marked as compatibility mode.
#define SNDLEVEL_IS_COMPATIBILITY_MODE( x ) ( (x) >= 256 )
// Sound guids are assigned on the server starting at 1
// On the client, they are assigned by the sound system starting at 0x80000001
typedef uint32 SoundGuid_t;
#define INVALID_SOUND_GUID (SoundGuid_t)0
//-----------------------------------------------------------------------------
// Purpose: Client side only
//-----------------------------------------------------------------------------
struct SndInfo_t
{
// Sound Guid
SoundGuid_t m_nGuid;
FileNameHandle_t m_filenameHandle; // filesystem filename handle - call IFilesystem to conver this to a string
CEntityIndex m_nSoundSource;
int m_nChannel;
// If a sound is being played through a speaker entity (e.g., on a monitor,), this is the
// entity upon which to show the lips moving, if the sound has sentence data
CEntityIndex m_nSpeakerEntity;
float m_flVolume;
float m_flLastSpatializedVolume;
// Radius of this sound effect (spatialization is different within the radius)
float m_flRadius;
int m_nPitch;
Vector *m_pOrigin;
Vector *m_pDirection;
// if true, assume sound source can move and update according to entity
bool m_bUpdatePositions;
// true if playing linked sentence
bool m_bIsSentence;
// if true, bypass all dsp processing for this sound (ie: music)
bool m_bDryMix;
// true if sound is playing through in-game speaker entity.
bool m_bSpeaker;
// for snd_show, networked sounds get colored differently than local sounds
bool m_bFromServer;
};
//-----------------------------------------------------------------------------
// Hearing info
//-----------------------------------------------------------------------------
struct AudioState_t
{
AudioState_t()
{
Clear();
}
void Clear()
{
m_Origin.Init();
m_Angles.Init();
m_nViewEntity.SetRaw( -1 );
m_bValid = false;
m_bIsUnderwater = false;
}
Vector m_Origin;
QAngle m_Angles;
CEntityIndex m_nViewEntity;
bool m_bIsUnderwater : 1;
bool m_bValid : 1;
};
#endif // SNDINFO_H

View File

@@ -0,0 +1,244 @@
//===================== Copyright (c) Valve Corporation. All Rights Reserved. ======================
//
//
//
//==================================================================================================
#ifndef SOUNDSCHEMA_H
#define SOUNDSCHEMA_H
#ifdef _WIN32
#pragma once
#endif
#ifndef SOURCE1
#include "resourcefile/resourcefile.h"
#include "resourcefile/resourcetype.h"
#endif
FORWARD_DECLARE_HANDLE( memhandle_t );
schema struct CEmphasisSample_t
{
public:
TYPEMETA( MNoScatter );
DECLARE_SCHEMA_DATA_CLASS(CEmphasisSample_t)
float32 time;
float32 value;
};
schema struct CBasePhonemeTag_t
{
public:
TYPEMETA( MNoScatter );
DECLARE_SCHEMA_DATA_CLASS(CBasePhonemeTag_t)
float GetStartTime() const { return m_flStartTime; }
float GetEndTime() const { return m_flEndTime; }
int GetPhonemeCode() const { return m_nPhonemeCode; }
public:
float32 m_flStartTime;
float32 m_flEndTime;
uint16 m_nPhonemeCode;
};
// A sentence can be closed captioned
// The default case is the entire sentence shown at start time
//
// "<persist:2.0><clr:255,0,0,0>The <I>default<I> case"
// "<sameline>is the <U>entire<U> sentence shown at <B>start time<B>"
// Commands that aren't closed at end of phrase are automatically terminated
//
// Commands
// <linger:2.0> The line should persist for 2.0 seconds beyond m_flEndTime
// <sameline> Don't go to new line for next phrase on stack
// <clr:r,g,b,a> Push current color onto stack and start drawing with new
// color until we reach the next <clr> marker or a <clr> with no commands which
// means restore the previous color
// <U> Underline text (start/end)
// <I> Italics text (start/end)
// <B> Bold text (start/end)
// <position:where> Draw caption at special location ??? needed
// <cr> Go to new line
// Close Captioning Support
// The phonemes drive the mouth in english, but the CC text can
// be one of several languages
//-----------------------------------------------------------------------------
// Purpose: A sentence is a box of words, and words contain phonemes
//-----------------------------------------------------------------------------
schema class CSentence_t
{
public:
TYPEMETA( MNoScatter );
DECLARE_SCHEMA_DATA_CLASS(CSentence_t)
inline float GetIntensity( float time, float endtime );
inline CEmphasisSample_t *GetBoundedSample( int number, float endtime );
int GetNumSamples( void ) { return m_EmphasisSamples.Count(); }
CEmphasisSample_t *GetSample( int index ) { return &m_EmphasisSamples[ index ]; }
bool GetVoiceDuck() const { return m_bShouldVoiceDuck; }
int GetRuntimePhonemeCount() const { return m_RunTimePhonemes.Count(); }
const CBasePhonemeTag_t *GetRuntimePhoneme( int i ) const { return &m_RunTimePhonemes[ i ]; }
public:
bool m_bShouldVoiceDuck;
CResourceArray< CBasePhonemeTag_t > m_RunTimePhonemes;
// Phoneme emphasis data
CResourceArray< CEmphasisSample_t > m_EmphasisSamples;
};
//-----------------------------------------------------------------------------
// Purpose:
// Input : number -
// Output : CEmphasisSample_t
//-----------------------------------------------------------------------------
inline CEmphasisSample_t *CSentence_t::GetBoundedSample( int number, float endtime )
{
// Search for two samples which span time f
static CEmphasisSample_t nullstart;
nullstart.time = 0.0f;
nullstart.value = 0.5f;
static CEmphasisSample_t nullend;
nullend.time = endtime;
nullend.value = 0.5f;
if ( number < 0 )
{
return &nullstart;
}
else if ( number >= GetNumSamples() )
{
return &nullend;
}
return GetSample( number );
}
//-----------------------------------------------------------------------------
// Purpose:
// Input : time -
// type -
// Output : float
//-----------------------------------------------------------------------------
inline float CSentence_t::GetIntensity( float time, float endtime )
{
float zeroValue = 0.5f;
int c = GetNumSamples();
if ( c <= 0 )
{
return zeroValue;
}
int i;
for ( i = -1 ; i < c; i++ )
{
CEmphasisSample_t *s = GetBoundedSample( i, endtime );
CEmphasisSample_t *n = GetBoundedSample( i + 1, endtime );
if ( !s || !n )
continue;
if ( time >= s->time && time <= n->time )
{
break;
}
}
int prev = i - 1;
int start = i;
int end = i + 1;
int next = i + 2;
prev = MAX( -1, prev );
start = MAX( -1, start );
end = MIN( end, GetNumSamples() );
next = MIN( next, GetNumSamples() );
CEmphasisSample_t *esPre = GetBoundedSample( prev, endtime );
CEmphasisSample_t *esStart = GetBoundedSample( start, endtime );
CEmphasisSample_t *esEnd = GetBoundedSample( end, endtime );
CEmphasisSample_t *esNext = GetBoundedSample( next, endtime );
float dt = esEnd->time - esStart->time;
dt = clamp( dt, 0.01f, 1.0f );
Vector vPre( esPre->time, esPre->value, 0 );
Vector vStart( esStart->time, esStart->value, 0 );
Vector vEnd( esEnd->time, esEnd->value, 0 );
Vector vNext( esNext->time, esNext->value, 0 );
float f2 = ( time - esStart->time ) / ( dt );
f2 = clamp( f2, 0.0f, 1.0f );
Vector vOut;
Catmull_Rom_Spline(
vPre,
vStart,
vEnd,
vNext,
f2,
vOut );
float retval = clamp( vOut.y, 0.0f, 1.0f );
return retval;
}
// Used for bitpacking
struct soundinfoheader_t
{
unsigned int m_Type : 2; // 0 1 2 or 3
unsigned int m_bits : 5; // 0 to 31
unsigned int m_channels : 2; // 1 or 2
unsigned int m_sampleSize : 3; // 1 2 or 4
unsigned int m_format : 2; // 1 == PCM, 2 == ADPCM
unsigned int m_rate : 17; // 0 to 64 K
};
schema struct VSound_t
{
DECLARE_SCHEMA_DATA_CLASS(VSound_t)
uint32 m_bitpackedsoundinfo;
const soundinfoheader_t &info() const { return *(soundinfoheader_t *)&m_bitpackedsoundinfo; };
int m_Type() const { return info().m_Type; };
int m_bits() const { return info().m_bits; };
int m_channels() const { return info().m_channels; };
int m_sampleSize() const { return info().m_sampleSize; };
int m_format() const { return info().m_format; };
int m_rate() const { return info().m_rate; };
int32 m_loopStart; // -1 for no loop
uint32 m_sampleCount;
float32 m_flDuration; // Duration in seconds
// Phoneme stream (optional)
CResourcePointer< CSentence_t > m_Sentence;
// Raw wave header (reinterpreted based on m_Type())
CResourceArray< uint8 > m_pHeader;
uint32 m_nStreamingDataSize;
// Any data after header is the raw sample data (PCM, ADPCM, .mp3, whatever)
uint32 m_nStreamingDataOffset; META( MNoSchema );
memhandle_t m_hStreamDataCacheHandle; META( MNoSchema );
};
#ifndef SOURCE1
#define RESOURCE_TYPE_SOUND RESOURCE_TYPE('s','n','d', 0)
DEFINE_RESOURCE_TYPE( VSound_t, RESOURCE_TYPE_SOUND, HSound, HSoundStrong );
#define SOUND_HANDLE_INVALID ( (HSound)0 )
#endif
#endif // SOUNDSCHEMA_H

View File

@@ -0,0 +1,307 @@
//====== Copyright 1996-2005, Valve Corporation, All rights reserved. =======
//
// Purpose:
//
//=============================================================================
#include "tier0/platform.h"
#include "mpafile.h" // also includes vbrheader.h
#include "tier0/dbg.h"
// NOTE: This has to be the last file included!
#include "tier0/memdbgon.h"
#ifndef MAKEFOURCC
#define MAKEFOURCC(ch0, ch1, ch2, ch3) \
((uint32)(BYTE)(ch0) | ((uint32)(BYTE)(ch1) << 8) | \
((uint32)(BYTE)(ch2) << 16) | ((uint32)(BYTE)(ch3) << 24 ))
#endif //defined(MAKEFOURCC)
// XING Header offset: 1. index = lsf, 2. index = mono
uint32 CVBRHeader::m_dwXINGOffsets[2][2] =
{
// MPEG 1 (not mono, mono)
{ 32 + MPA_HEADER_SIZE, 17 + MPA_HEADER_SIZE },
// MPEG 2/2.5
{ 17 + MPA_HEADER_SIZE, 9 + MPA_HEADER_SIZE }
};
// first test with this static method, if it does exist
bool CVBRHeader::IsVBRHeaderAvailable( CMPAFile* pMPAFile, VBRHeaderType& HeaderType, uint32& dwOffset )
{
Assert(pMPAFile);
// where does VBR header begin (XING)
uint32 dwNewOffset = dwOffset + m_dwXINGOffsets[pMPAFile->m_pMPAHeader->IsLSF()][pMPAFile->m_pMPAHeader->IsMono()];
// check for XING header first
if( CheckXING( pMPAFile, dwNewOffset ) )
{
HeaderType = XINGHeader;
// seek offset back to header begin
dwOffset = dwNewOffset - 4;
return true;
}
// VBRI header always at fixed offset
dwNewOffset = dwOffset + 32 + MPA_HEADER_SIZE;
if( CheckVBRI( pMPAFile, dwNewOffset ) )
{
HeaderType = VBRIHeader;
// seek offset back to header begin
dwOffset = dwNewOffset - 4;
return true;
}
HeaderType = NoHeader;
return false;
}
CVBRHeader::CVBRHeader( CMPAFile* pMPAFile, VBRHeaderType HeaderType, uint32 dwOffset ) :
m_pMPAFile( pMPAFile ), m_pnToc(NULL), m_HeaderType( HeaderType ), m_dwOffset(dwOffset), m_dwFrames(0), m_dwBytes(0)
{
switch( m_HeaderType )
{
case NoHeader:
// no Header found
throw CMPAException( CMPAException::NoVBRHeader, pMPAFile->GetFilename(), NULL, false );
break;
case XINGHeader:
if( !ExtractXINGHeader( m_dwOffset ) )
throw CMPAException( CMPAException::NoVBRHeader, pMPAFile->GetFilename(), NULL, false );
break;
case VBRIHeader:
if( !ExtractVBRIHeader( m_dwOffset ) )
throw CMPAException( CMPAException::NoVBRHeader, pMPAFile->GetFilename(), NULL, false );
break;
}
// calc bitrate
if( m_dwBytes > 0 && m_dwFrames > 0 )
{
// calc number of seconds
m_dwBytesPerSec = m_pMPAFile->m_pMPAHeader->GetBytesPerSecond( m_dwFrames, m_dwBytes );
}
else // incomplete header found
{
throw CMPAException( CMPAException::IncompleteVBRHeader, pMPAFile->GetFilename(), NULL, false );
}
}
bool CVBRHeader::CheckID( CMPAFile* pMPAFile, char ch0, char ch1, char ch2, char ch3, uint32& dwOffset )
{
return ( pMPAFile->ExtractBytes( dwOffset, 4 ) == MAKEFOURCC( ch3, ch2, ch1, ch0 ) );
}
bool CVBRHeader::CheckXING( CMPAFile* pMPAFile, uint32& dwOffset )
{
// XING ID found?
if( !CheckID( pMPAFile, 'X', 'i', 'n', 'g', dwOffset) && !CheckID( pMPAFile, 'I', 'n', 'f', 'o', dwOffset) )
return false;
return true;
}
bool CVBRHeader::CheckVBRI( CMPAFile* pMPAFile, uint32& dwOffset )
{
// VBRI ID found?
if( !CheckID( pMPAFile, 'V', 'B', 'R', 'I', dwOffset ) )
return false;
return true;
}
// currently not used
bool CVBRHeader::ExtractLAMETag( uint32 dwOffset )
{
// LAME ID found?
if( !CheckID( m_pMPAFile, 'L', 'A', 'M', 'E', dwOffset ) && !CheckID( m_pMPAFile, 'G', 'O', 'G', 'O', dwOffset ) )
return false;
return true;
}
bool CVBRHeader::ExtractXINGHeader( uint32 dwOffset )
{
/* XING VBR-Header
size description
4 'Xing' or 'Info'
4 flags (indicates which fields are used)
4 frames (optional)
4 bytes (optional)
100 toc (optional)
4 a VBR quality indicator: 0=best 100=worst (optional)
*/
if( !CheckXING( m_pMPAFile, dwOffset ) )
return false;
uint32 dwFlags;
// get flags (mandatory in XING header)
dwFlags = m_pMPAFile->ExtractBytes( dwOffset, 4 );
// extract total number of frames in file
if(dwFlags & FRAMES_FLAG)
m_dwFrames = m_pMPAFile->ExtractBytes(dwOffset,4);
// extract total number of bytes in file
if(dwFlags & BYTES_FLAG)
m_dwBytes = m_pMPAFile->ExtractBytes(dwOffset,4);
// extract TOC (for more accurate seeking)
if (dwFlags & TOC_FLAG)
{
m_dwTableSize = 100;
m_pnToc = new int[m_dwTableSize];
if( m_pnToc )
{
for(uint32 i=0;i<m_dwTableSize;i++)
m_pnToc[i] = m_pMPAFile->ExtractBytes( dwOffset, 1 );
}
}
m_dwQuality = (uint32)-1;
if(dwFlags & VBR_SCALE_FLAG )
m_dwQuality = m_pMPAFile->ExtractBytes(dwOffset, 4);
return true;
}
bool CVBRHeader::ExtractVBRIHeader( uint32 dwOffset )
{
/* FhG VBRI Header
size description
4 'VBRI' (ID)
2 version
2 delay
2 quality
4 # bytes
4 # frames
2 table size (for TOC)
2 table scale (for TOC)
2 size of table entry (max. size = 4 byte (must be stored in an integer))
2 frames per table entry
?? dynamic table consisting out of frames with size 1-4
whole length in table size! (for TOC)
*/
if( !CheckVBRI( m_pMPAFile, dwOffset ) )
return false;
// extract all fields from header (all mandatory)
m_dwVersion = m_pMPAFile->ExtractBytes(dwOffset, 2 );
m_fDelay = (float)m_pMPAFile->ExtractBytes(dwOffset, 2 );
m_dwQuality = m_pMPAFile->ExtractBytes(dwOffset, 2 );
m_dwBytes = m_pMPAFile->ExtractBytes(dwOffset, 4 );
m_dwFrames = m_pMPAFile->ExtractBytes(dwOffset, 4 );
m_dwTableSize = m_pMPAFile->ExtractBytes(dwOffset, 2 ) + 1; //!!!
m_dwTableScale = m_pMPAFile->ExtractBytes(dwOffset, 2 );
m_dwBytesPerEntry = m_pMPAFile->ExtractBytes(dwOffset, 2 );
m_dwFramesPerEntry = m_pMPAFile->ExtractBytes(dwOffset, 2 );
// extract TOC (for more accurate seeking)
m_pnToc = new int[m_dwTableSize];
if( m_pnToc )
{
for ( unsigned int i = 0 ; i < m_dwTableSize ; i++)
{
m_pnToc[i] = m_pMPAFile->ExtractBytes(dwOffset, m_dwBytesPerEntry );
}
}
return true;
}
CVBRHeader::~CVBRHeader(void)
{
if( m_pnToc )
delete[] m_pnToc;
}
// get byte position for percentage value (fPercent) of file
bool CVBRHeader::SeekPoint(float fPercent, uint32& dwSeekPoint)
{
if( !m_pnToc || m_dwBytes == 0 )
return false;
if( fPercent < 0.0f )
fPercent = 0.0f;
if( fPercent > 100.0f )
fPercent = 100.0f;
switch( m_HeaderType )
{
case XINGHeader:
dwSeekPoint = SeekPointXING( fPercent );
break;
case VBRIHeader:
dwSeekPoint = SeekPointVBRI( fPercent );
break;
}
return true;
}
uint32 CVBRHeader::SeekPointXING(float fPercent) const
{
// interpolate in TOC to get file seek point in bytes
int a;
float fa, fb, fx;
a = (int)fPercent;
if( a > 99 ) a = 99;
fa = (float)m_pnToc[a];
if( a < 99 )
{
fb = (float)m_pnToc[a+1];
}
else
{
fb = 256.0f;
}
fx = fa + (fb-fa)*(fPercent-a);
uint32 dwSeekpoint = (int)((1.0f/256.0f)*fx*m_dwBytes);
return dwSeekpoint;
}
uint32 CVBRHeader::SeekPointVBRI(float fPercent) const
{
return SeekPointByTimeVBRI( (fPercent/100.0f) * m_pMPAFile->m_pMPAHeader->GetLengthSecond( m_dwFrames ) * 1000.0f );
}
uint32 CVBRHeader::SeekPointByTimeVBRI(float fEntryTimeMS) const
{
unsigned int i=0, fraction = 0;
uint32 dwSeekPoint = 0;
float fLengthMS;
float fLengthMSPerTOCEntry;
float fAccumulatedTimeMS = 0.0f ;
fLengthMS = (float)m_pMPAFile->m_pMPAHeader->GetLengthSecond( m_dwFrames ) * 1000.0f ;
fLengthMSPerTOCEntry = fLengthMS / (float)m_dwTableSize;
if ( fEntryTimeMS > fLengthMS )
fEntryTimeMS = fLengthMS;
while ( fAccumulatedTimeMS <= fEntryTimeMS )
{
dwSeekPoint += m_pnToc[i++];
fAccumulatedTimeMS += fLengthMSPerTOCEntry;
}
// Searched too far; correct result
fraction = ( (int)(((( fAccumulatedTimeMS - fEntryTimeMS ) / fLengthMSPerTOCEntry )
+ (1.0f/(2.0f*(float)m_dwFramesPerEntry))) * (float)m_dwFramesPerEntry));
dwSeekPoint -= (uint32)((float)m_pnToc[i-1] * (float)(fraction)
/ (float)m_dwFramesPerEntry);
return dwSeekPoint;
}

View File

@@ -0,0 +1,73 @@
//====== Copyright 1996-2005, Valve Corporation, All rights reserved. =======
//
// Purpose:
//
//=============================================================================
#ifndef VBRHEADER_H
#define VBRHEADER_H
#ifdef _WIN32
#pragma once
#endif
#include "tier0/platform.h"
// for XING VBR Header flags
#define FRAMES_FLAG 0x0001
#define BYTES_FLAG 0x0002
#define TOC_FLAG 0x0004
#define VBR_SCALE_FLAG 0x0008
class CMPAFile;
class CVBRHeader
{
public:
enum VBRHeaderType
{
NoHeader,
XINGHeader,
VBRIHeader
};
CVBRHeader( CMPAFile* pMPAFile, VBRHeaderType HeaderType, uint32 dwOffset );
~CVBRHeader(void);
static bool IsVBRHeaderAvailable( CMPAFile* pMPAFile, VBRHeaderType& HeaderType, uint32& dwOffset );
bool SeekPoint(float fPercent, uint32& dwSeekPoint);
uint32 m_dwBytesPerSec;
uint32 m_dwBytes; // total number of bytes
uint32 m_dwFrames; // total number of frames
private:
static uint32 m_dwXINGOffsets[2][2];
static bool CheckID( CMPAFile* pMPAFile, char ch0, char ch1, char ch2, char ch3, uint32& dwOffset );
static bool CheckXING( CMPAFile* pMPAFile, uint32& dwOffset );
static bool CheckVBRI( CMPAFile* pMPAFile, uint32& dwOffset );
bool ExtractLAMETag( uint32 dwOffset );
bool ExtractXINGHeader( uint32 dwOffset );
bool ExtractVBRIHeader( uint32 dwOffset );
uint32 SeekPointXING(float fPercent)const ;
uint32 SeekPointVBRI(float fPercent) const;
uint32 SeekPointByTimeVBRI(float fEntryTimeMS) const;
CMPAFile* m_pMPAFile;
public:
VBRHeaderType m_HeaderType;
uint32 m_dwOffset;
uint32 m_dwQuality; // quality (0..100)
int* m_pnToc; // TOC points for seeking (must be freed)
uint32 m_dwTableSize; // size of table (number of entries)
// only VBRI
float m_fDelay;
uint32 m_dwTableScale; // for seeking
uint32 m_dwBytesPerEntry;
uint32 m_dwFramesPerEntry;
uint32 m_dwVersion;
};
#endif // VBRHEADER_H

112
public/soundsystem/voice.h Normal file
View File

@@ -0,0 +1,112 @@
//========= Copyright <20> 1996-2005, Valve Corporation, All rights reserved. ============//
//
// Purpose:
//
// $NoKeywords: $
//=============================================================================//
#ifndef VOICE_H
#define VOICE_H
#pragma once
/*! @defgroup Voice Voice
Defines the engine's interface to the voice code.
@{
*/
#define VOICE_OUTPUT_SAMPLE_RATE 11025 // Sample rate that we feed to the mixer.
//! Returned on error from certain voice functions.
#define VOICE_CHANNEL_ERROR -1
#define VOICE_CHANNEL_IN_TWEAK_MODE -2 // Returned by AssignChannel if currently in tweak mode (not an error).
//! Initialize the voice code.
bool Voice_Init();
//! Force Initialization with default codec.
void Voice_ForceInit();
//! Shutdown the voice code.
void Voice_Deinit();
//! Returns true if the client has voice enabled
bool Voice_Enabled( void );
//! Returns true if the user can hear themself speak.
bool Voice_GetLoopback();
//! This is called periodically by the engine when the server acks the local player talking.
//! This tells the client DLL that the local player is talking and fades after about 200ms.
void Voice_LocalPlayerTalkingAck( CSplitScreenSlot iSsSlot );
//! Call every frame to update the voice stuff.
bool Voice_Idle(float frametime);
//! Returns true if mic input is currently being recorded.
bool Voice_IsRecording( CSplitScreenSlot nSplitScreenSlot );
//! Begin recording input from the mic.
bool Voice_RecordStart(
CSplitScreenSlot nSplitScreenSlot,
//! Filename to store incoming mic data, or NULL if none.
const char *pUncompressedFile,
//! Filename to store the output of compression and decompressiong with the codec, or NULL if none.
const char *pDecompressedFile,
//! If this is non-null, the voice manager will use this file for input instead of the mic.
const char *pMicInputFile
);
//! Stop recording from the mic.
bool Voice_RecordStop( CSplitScreenSlot nSplitScreenSlot );
//! Get the most recent N bytes of compressed data. If nCount is less than the number of
//! available bytes, it discards the first bytes and gives you the last ones.
//! Set bFinal to true on the last call to this (it will flush out any stored voice data).
int Voice_GetCompressedData( CSplitScreenSlot nSplitScreenSlot, char *pchData, int nCount, bool bFinal);
//! Pass incoming data from the server into here.
//! The data should have been compressed and gotten through a Voice_GetCompressedData call.
int Voice_AddIncomingData(
//! Channel index.
int nChannel,
//! Compressed data to add to the channel.
const char *pchData,
//! Number of bytes in pchData.
int nCount,
//! Sequence number. If a packet is missed, it adds padding so the time isn't squashed.
int iSequenceNumber,
//! Was this data compressed?
bool isCompressed = true
);
#define VOICE_TIME_PADDING 0.2f // Time between receiving the first voice packet and actually starting
// to play the sound. This accounts for frametime differences on the clients
// and the server.
//! Call this to reserve a voice channel for the specified entity to talk into.
//! \return A channel index for use with Voice_AddIncomingData or VOICE_CHANNEL_ERROR on error.
int Voice_AssignChannel(int nEntity, bool bProximity, float timePadding = VOICE_TIME_PADDING );
//! Call this to get the channel index that the specified entity is talking into.
//! \return A channel index for use with Voice_AddIncomingData or VOICE_CHANNEL_ERROR if the entity isn't talking.
int Voice_GetChannel(int nEntity);
/*! @} */
#endif // VOICE_H