2014-09-16 22:51:31 +02:00
/**********************************************************************************************
2013-11-18 23:38:44 +01:00
*
2020-11-19 20:11:11 +01:00
* raudio v1 .0 - A simple and easy - to - use audio library based on miniaudio
2013-11-18 23:38:44 +01:00
*
2017-03-20 20:34:44 +01:00
* FEATURES :
* - Manage audio device ( init / close )
2020-02-03 18:31:30 +01:00
* - Manage raw audio context
* - Manage mixing channels
2017-03-20 20:34:44 +01:00
* - Load and unload audio files
* - Format wave data ( sample rate , size , channels )
* - Play / Stop / Pause / Resume loaded audio
2017-02-16 00:50:02 +01:00
*
* CONFIGURATION :
2018-04-07 22:29:53 +02:00
*
2019-01-10 16:32:40 +01:00
* # define RAUDIO_STANDALONE
2017-03-19 12:52:58 +01:00
* Define to use the module as standalone library ( independently of raylib ) .
2017-02-16 00:50:02 +01:00
* Required types and functions are defined in the same module .
*
2017-03-26 22:49:01 +02:00
* # define SUPPORT_FILEFORMAT_WAV
2017-02-16 00:50:02 +01:00
* # define SUPPORT_FILEFORMAT_OGG
* # define SUPPORT_FILEFORMAT_XM
* # define SUPPORT_FILEFORMAT_MOD
* # define SUPPORT_FILEFORMAT_FLAC
2018-05-04 16:59:48 +02:00
* # define SUPPORT_FILEFORMAT_MP3
2018-04-07 22:29:53 +02:00
* Selected desired fileformats to be supported for loading . Some of those formats are
2017-02-16 00:50:02 +01:00
* supported by default , to remove support , just comment unrequired # define in this module
*
* DEPENDENCIES :
2019-03-12 11:54:45 +01:00
* miniaudio . h - Audio device management lib ( https : //github.com/dr-soft/miniaudio)
2019-02-12 12:18:01 +01:00
* stb_vorbis . h - Ogg audio files loading ( http : //www.nothings.org/stb_vorbis/)
* dr_mp3 . h - MP3 audio file loading ( https : //github.com/mackron/dr_libs)
* dr_flac . h - FLAC audio file loading ( https : //github.com/mackron/dr_libs)
* jar_xm . h - XM module file loading
* jar_mod . h - MOD audio file loading
2017-12-20 12:34:18 +01:00
*
2017-02-16 00:50:02 +01:00
* CONTRIBUTORS :
2017-12-20 12:34:18 +01:00
* David Reid ( github : @ mackron ) ( Nov . 2017 ) :
2019-03-12 11:54:45 +01:00
* - Complete port to miniaudio library
2017-12-20 12:34:18 +01:00
*
* Joshua Reisenauer ( github : @ kd7tck ) ( 2015 )
2017-03-19 12:52:58 +01:00
* - XM audio module support ( jar_xm )
* - MOD audio module support ( jar_mod )
* - Mixing channels support
* - Raw audio context support
2017-02-16 00:50:02 +01:00
*
2016-07-15 18:16:34 +02:00
*
2017-02-16 00:50:02 +01:00
* LICENSE : zlib / libpng
2016-11-16 18:46:13 +01:00
*
2021-01-02 18:15:13 +01:00
* Copyright ( c ) 2013 - 2021 Ramon Santamaria ( @ raysan5 )
2014-09-03 16:51:28 +02:00
*
* This software is provided " as-is " , without any express or implied warranty . In no event
2013-11-23 13:30:54 +01:00
* will the authors be held liable for any damages arising from the use of this software .
2013-11-18 23:38:44 +01:00
*
2014-09-03 16:51:28 +02:00
* Permission is granted to anyone to use this software for any purpose , including commercial
2013-11-23 13:30:54 +01:00
* applications , and to alter it and redistribute it freely , subject to the following restrictions :
2013-11-18 23:38:44 +01:00
*
2014-09-03 16:51:28 +02:00
* 1. The origin of this software must not be misrepresented ; you must not claim that you
* wrote the original software . If you use this software in a product , an acknowledgment
2013-11-23 13:30:54 +01:00
* in the product documentation would be appreciated but is not required .
2013-11-18 23:38:44 +01:00
*
2013-11-23 13:30:54 +01:00
* 2. Altered source versions must be plainly marked as such , and must not be misrepresented
* as being the original software .
2013-11-18 23:38:44 +01:00
*
2013-11-23 13:30:54 +01:00
* 3. This notice may not be removed or altered from any source distribution .
2013-11-18 23:38:44 +01:00
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2019-01-10 16:32:40 +01:00
# if defined(RAUDIO_STANDALONE)
# include "raudio.h"
2016-11-18 13:39:57 +01:00
# include <stdarg.h> // Required for: va_list, va_start(), vfprintf(), va_end()
2015-07-29 21:41:19 +02:00
# else
2018-05-17 00:04:58 +02:00
# include "raylib.h" // Declares module functions
2019-07-23 22:21:01 +02:00
2018-12-18 00:20:08 +01:00
// Check if config flags have been externally provided on compilation line
# if !defined(EXTERNAL_CONFIG_FLAGS)
# include "config.h" // Defines module configuration flags
# endif
2017-04-21 00:08:00 +02:00
# include "utils.h" // Required for: fopen() Android mapping
2015-07-29 21:41:19 +02:00
# endif
2013-11-18 23:38:44 +01:00
2020-02-04 22:43:31 +10:00
# if defined(_WIN32)
2020-02-14 17:13:37 +01:00
// To avoid conflicting windows.h symbols with raylib, some flags are defined
2020-02-26 20:23:36 +01:00
// WARNING: Those flags avoid inclusion of some Win32 headers that could be required
2020-02-04 22:43:31 +10:00
// by user at some point and won't be included...
//-------------------------------------------------------------------------------------
// If defined, the following flags inhibit definition of the indicated items.
# define NOGDICAPMASKS // CC_*, LC_*, PC_*, CP_*, TC_*, RC_
# define NOVIRTUALKEYCODES // VK_*
# define NOWINMESSAGES // WM_*, EM_*, LB_*, CB_*
# define NOWINSTYLES // WS_*, CS_*, ES_*, LBS_*, SBS_*, CBS_*
# define NOSYSMETRICS // SM_*
# define NOMENUS // MF_*
# define NOICONS // IDI_*
# define NOKEYSTATES // MK_*
# define NOSYSCOMMANDS // SC_*
# define NORASTEROPS // Binary and Tertiary raster ops
# define NOSHOWWINDOW // SW_*
# define OEMRESOURCE // OEM Resource values
# define NOATOM // Atom Manager routines
# define NOCLIPBOARD // Clipboard routines
# define NOCOLOR // Screen colors
# define NOCTLMGR // Control and Dialog routines
# define NODRAWTEXT // DrawText() and DT_*
# define NOGDI // All GDI defines and routines
# define NOKERNEL // All KERNEL defines and routines
# define NOUSER // All USER defines and routines
//#define NONLS // All NLS defines and routines
# define NOMB // MB_* and MessageBox()
# define NOMEMMGR // GMEM_*, LMEM_*, GHND, LHND, associated routines
# define NOMETAFILE // typedef METAFILEPICT
# define NOMINMAX // Macros min(a,b) and max(a,b)
# define NOMSG // typedef MSG and associated routines
# define NOOPENFILE // OpenFile(), OemToAnsi, AnsiToOem, and OF_*
# define NOSCROLL // SB_* and scrolling routines
# define NOSERVICE // All Service Controller routines, SERVICE_ equates, etc.
# define NOSOUND // Sound driver routines
# define NOTEXTMETRIC // typedef TEXTMETRIC and associated routines
# define NOWH // SetWindowsHook and WH_*
# define NOWINOFFSETS // GWL_*, GCL_*, associated routines
# define NOCOMM // COMM driver routines
# define NOKANJI // Kanji support stuff.
# define NOHELP // Help engine interface.
# define NOPROFILER // Profiler interface.
# define NODEFERWINDOWPOS // DeferWindowPos routines
# define NOMCX // Modem Configuration Extensions
// Type required before windows.h inclusion
typedef struct tagMSG * LPMSG ;
# include <windows.h>
// Type required by some unused function...
typedef struct tagBITMAPINFOHEADER {
DWORD biSize ;
LONG biWidth ;
LONG biHeight ;
WORD biPlanes ;
WORD biBitCount ;
DWORD biCompression ;
DWORD biSizeImage ;
LONG biXPelsPerMeter ;
LONG biYPelsPerMeter ;
DWORD biClrUsed ;
DWORD biClrImportant ;
} BITMAPINFOHEADER , * PBITMAPINFOHEADER ;
# include <objbase.h>
# include <mmreg.h>
# include <mmsystem.h>
2020-02-14 17:13:37 +01:00
// Some required types defined for MSVC/TinyC compiler
2020-02-04 22:43:31 +10:00
# if defined(_MSC_VER) || defined(__TINYC__)
# include "propidl.h"
# endif
# endif
2020-03-17 13:40:07 +01:00
# define MA_MALLOC RL_MALLOC
# define MA_FREE RL_FREE
2019-03-12 11:54:45 +01:00
# define MA_NO_JACK
2020-07-10 13:29:42 +02:00
# define MA_NO_WAV
# define MA_NO_FLAC
# define MA_NO_MP3
2019-03-12 11:54:45 +01:00
# define MINIAUDIO_IMPLEMENTATION
2021-02-02 11:42:49 +01:00
//#define MA_DEBUG_OUTPUT
2020-02-14 17:13:37 +01:00
# include "external/miniaudio.h" // miniaudio library
# undef PlaySound // Win32 API: windows.h > mmsystem.h defines PlaySound macro
2013-11-18 23:38:44 +01:00
2020-02-14 17:13:37 +01:00
# include <stdlib.h> // Required for: malloc(), free()
# include <stdio.h> // Required for: FILE, fopen(), fclose(), fread()
2013-11-18 23:38:44 +01:00
2020-02-04 16:55:24 +01:00
# if defined(RAUDIO_STANDALONE)
2020-02-14 17:13:37 +01:00
# include <string.h> // Required for: strcmp() [Used in IsFileExtension()]
2020-02-27 13:33:09 +01:00
2020-02-26 20:23:36 +01:00
# if !defined(TRACELOG)
# define TRACELOG(level, ...) (void)0
# endif
2020-11-03 23:47:33 +01:00
2020-07-31 12:13:04 +02:00
// Allow custom memory allocators
# ifndef RL_MALLOC
# define RL_MALLOC(sz) malloc(sz)
# endif
# ifndef RL_CALLOC
# define RL_CALLOC(n,sz) calloc(n,sz)
# endif
# ifndef RL_REALLOC
# define RL_REALLOC(ptr,sz) realloc(ptr,sz)
# endif
# ifndef RL_FREE
# define RL_FREE(ptr) free(ptr)
# endif
2020-02-04 16:55:24 +01:00
# endif
2017-03-26 22:49:01 +02:00
# if defined(SUPPORT_FILEFORMAT_OGG)
2020-03-17 13:40:07 +01:00
// TODO: Remap malloc()/free() calls to RL_MALLOC/RL_FREE
2018-12-21 00:17:44 +01:00
# define STB_VORBIS_IMPLEMENTATION
2017-03-26 22:49:01 +02:00
# include "external/stb_vorbis.h" // OGG loading functions
# endif
2016-04-25 18:40:19 -07:00
2017-03-26 22:49:01 +02:00
# if defined(SUPPORT_FILEFORMAT_XM)
2020-03-17 13:40:07 +01:00
# define JARXM_MALLOC RL_MALLOC
# define JARXM_FREE RL_FREE
2020-03-25 19:41:51 +01:00
2017-03-26 22:49:01 +02:00
# define JAR_XM_IMPLEMENTATION
# include "external/jar_xm.h" // XM loading functions
# endif
2013-11-18 23:38:44 +01:00
2017-03-26 22:49:01 +02:00
# if defined(SUPPORT_FILEFORMAT_MOD)
2020-03-17 13:40:07 +01:00
# define JARMOD_MALLOC RL_MALLOC
# define JARMOD_FREE RL_FREE
2020-03-25 19:41:51 +01:00
2017-03-26 22:49:01 +02:00
# define JAR_MOD_IMPLEMENTATION
# include "external/jar_mod.h" // MOD loading functions
# endif
2016-06-01 20:09:00 -07:00
2020-05-23 23:19:59 +02:00
# if defined(SUPPORT_FILEFORMAT_WAV)
# define DRWAV_MALLOC RL_MALLOC
# define DRWAV_REALLOC RL_REALLOC
# define DRWAV_FREE RL_FREE
2020-03-17 13:40:07 +01:00
2020-05-23 23:19:59 +02:00
# define DR_WAV_IMPLEMENTATION
# include "external/dr_wav.h" // WAV loading functions
2017-03-26 22:49:01 +02:00
# endif
2016-10-10 18:22:55 +02:00
2018-05-04 16:59:48 +02:00
# if defined(SUPPORT_FILEFORMAT_MP3)
2020-03-17 13:40:07 +01:00
# define DRMP3_MALLOC RL_MALLOC
# define DRMP3_REALLOC RL_REALLOC
# define DRMP3_FREE RL_FREE
2020-03-25 19:41:51 +01:00
2018-05-04 16:59:48 +02:00
# define DR_MP3_IMPLEMENTATION
2018-11-23 11:58:45 +01:00
# include "external/dr_mp3.h" // MP3 loading functions
2018-05-04 16:59:48 +02:00
# endif
2020-05-23 23:19:59 +02:00
# if defined(SUPPORT_FILEFORMAT_FLAC)
# define DRFLAC_MALLOC RL_MALLOC
# define DRFLAC_REALLOC RL_REALLOC
# define DRFLAC_FREE RL_FREE
# define DR_FLAC_IMPLEMENTATION
# define DR_FLAC_NO_WIN32_IO
# include "external/dr_flac.h" // FLAC loading functions
# endif
2018-10-16 10:53:01 +02:00
# if defined(_MSC_VER)
2016-07-29 13:17:50 +02:00
# undef bool
# endif
2013-11-18 23:38:44 +01:00
//----------------------------------------------------------------------------------
// Defines and Macros
//----------------------------------------------------------------------------------
2020-05-01 17:31:44 +02:00
# ifndef AUDIO_DEVICE_FORMAT
# define AUDIO_DEVICE_FORMAT ma_format_f32 // Device output format (float-32bit)
# endif
# ifndef AUDIO_DEVICE_CHANNELS
# define AUDIO_DEVICE_CHANNELS 2 // Device output channels: stereo
# endif
2021-03-21 14:07:55 -07:00
2020-05-01 17:31:44 +02:00
# ifndef AUDIO_DEVICE_SAMPLE_RATE
2021-03-21 14:07:55 -07:00
# define AUDIO_DEVICE_SAMPLE_RATE 0 // Device output channels: stereo
2020-05-01 17:31:44 +02:00
# endif
# ifndef MAX_AUDIO_BUFFER_POOL_CHANNELS
# define MAX_AUDIO_BUFFER_POOL_CHANNELS 16 // Audio pool channels
# endif
2020-06-30 11:05:09 +02:00
# ifndef DEFAULT_AUDIO_BUFFER_SIZE
# define DEFAULT_AUDIO_BUFFER_SIZE 4096 // Default audio buffer size
# endif
2013-11-18 23:38:44 +01:00
//----------------------------------------------------------------------------------
// Types and Structures Definition
//----------------------------------------------------------------------------------
2014-04-19 16:36:49 +02:00
2019-07-23 22:21:01 +02:00
// Music context type
// NOTE: Depends on data structure provided by the library
// in charge of reading the different file types
2018-11-06 15:10:50 +01:00
typedef enum {
2021-05-30 18:02:06 +02:00
MUSIC_AUDIO_NONE = 0 , // No audio context loaded
MUSIC_AUDIO_WAV , // WAV audio context
MUSIC_AUDIO_OGG , // OGG audio context
MUSIC_AUDIO_FLAC , // FLAC audio context
MUSIC_AUDIO_MP3 , // MP3 audio context
MUSIC_MODULE_XM , // XM module audio context
MUSIC_MODULE_MOD // MOD module audio context
2017-12-20 00:34:31 +01:00
} MusicContextType ;
2016-08-01 12:49:17 +02:00
2019-01-10 16:32:40 +01:00
# if defined(RAUDIO_STANDALONE)
2021-05-30 18:02:06 +02:00
// Trace log level
// NOTE: Organized by priority level
2018-11-06 15:10:50 +01:00
typedef enum {
2021-05-30 18:02:06 +02:00
LOG_ALL = 0 , // Display all logs
LOG_TRACE , // Trace logging, intended for internal use only
LOG_DEBUG , // Debug logging, used for internal debugging, it should be disabled on release builds
LOG_INFO , // Info logging, used for program execution info
LOG_WARNING , // Warning logging, used on recoverable failures
LOG_ERROR , // Error logging, used on unrecoverable failures
LOG_FATAL , // Fatal logging, used to abort program: exit(EXIT_FAILURE)
LOG_NONE // Disable logging
2021-03-19 13:19:54 +01:00
} TraceLogLevel ;
2015-07-31 12:31:39 +02:00
# endif
2020-02-03 19:26:28 +01:00
// NOTE: Different logic is used when feeding data to the playback device
2020-02-03 18:31:30 +01:00
// depending on whether or not data is streamed (Music vs Sound)
2020-02-03 19:26:28 +01:00
typedef enum {
AUDIO_BUFFER_USAGE_STATIC = 0 ,
2020-02-03 18:31:30 +01:00
AUDIO_BUFFER_USAGE_STREAM
} AudioBufferUsage ;
// Audio buffer structure
struct rAudioBuffer {
2020-02-04 22:43:31 +10:00
ma_data_converter converter ; // Audio data converter
2020-02-03 18:31:30 +01:00
float volume ; // Audio buffer volume
float pitch ; // Audio buffer pitch
bool playing ; // Audio buffer state: AUDIO_PLAYING
bool paused ; // Audio buffer state: AUDIO_PAUSED
bool looping ; // Audio buffer looping, always true for AudioStreams
int usage ; // Audio buffer usage mode: STATIC or STREAM
bool isSubBufferProcessed [ 2 ] ; // SubBuffer processed (virtual double buffer)
unsigned int sizeInFrames ; // Total buffer size in frames
unsigned int frameCursorPos ; // Frame cursor position
2020-02-04 22:43:31 +10:00
unsigned int totalFramesProcessed ; // Total frames processed in this buffer (required for play timing)
2020-02-03 18:31:30 +01:00
unsigned char * data ; // Data buffer, on music stream keeps filling
rAudioBuffer * next ; // Next audio buffer on the list
rAudioBuffer * prev ; // Previous audio buffer on the list
} ;
# define AudioBuffer rAudioBuffer // HACK: To avoid CoreAudio (macOS) symbol collision
// Audio data context
typedef struct AudioData {
struct {
ma_context context ; // miniaudio context data
ma_device device ; // miniaudio device
ma_mutex lock ; // miniaudio mutex lock
bool isReady ; // Check if audio device is ready
} System ;
struct {
AudioBuffer * first ; // Pointer to first AudioBuffer in the list
AudioBuffer * last ; // Pointer to last AudioBuffer in the list
2020-02-14 17:13:37 +01:00
int defaultSize ; // Default audio buffer size for audio streams
2020-02-03 18:31:30 +01:00
} Buffer ;
struct {
unsigned int poolCounter ; // AudioBuffer pointers pool counter
2021-03-04 12:08:54 +01:00
AudioBuffer * pool [ MAX_AUDIO_BUFFER_POOL_CHANNELS ] ; // Multichannel AudioBuffer pointers pool
2020-02-03 18:31:30 +01:00
unsigned int channels [ MAX_AUDIO_BUFFER_POOL_CHANNELS ] ; // AudioBuffer pool channels
} MultiChannel ;
} AudioData ;
2013-11-18 23:38:44 +01:00
//----------------------------------------------------------------------------------
// Global Variables Definition
//----------------------------------------------------------------------------------
2020-02-14 17:13:37 +01:00
static AudioData AUDIO = { // Global AUDIO context
// NOTE: Music buffer size is defined by number of samples, independent of sample size and channels number
// After some math, considering a sampleRate of 48000, a buffer refill rate of 1/60 seconds and a
// standard double-buffering system, a 4096 samples buffer has been chosen, it should be enough
// In case of music-stalls, just increase this number
2021-03-21 14:07:55 -07:00
. Buffer . defaultSize = 0
2020-02-14 17:13:37 +01:00
} ;
2016-06-02 17:12:31 +02:00
2013-11-18 23:38:44 +01:00
//----------------------------------------------------------------------------------
// Module specific Functions Declaration
//----------------------------------------------------------------------------------
2020-02-03 18:31:30 +01:00
static void OnLog ( ma_context * pContext , ma_device * pDevice , ma_uint32 logLevel , const char * message ) ;
static void OnSendAudioDataToDevice ( ma_device * pDevice , void * pFramesOut , const void * pFramesInput , ma_uint32 frameCount ) ;
static void MixAudioFrames ( float * framesOut , const float * framesIn , ma_uint32 frameCount , float localVolume ) ;
2017-03-26 22:49:01 +02:00
# if defined(SUPPORT_FILEFORMAT_WAV)
2020-09-14 19:20:38 +02:00
static Wave LoadWAV ( const unsigned char * fileData , unsigned int fileSize ) ; // Load WAV file
2018-10-29 16:18:06 +01:00
static int SaveWAV ( Wave wave , const char * fileName ) ; // Save wave data as WAV file
2017-03-26 22:49:01 +02:00
# endif
# if defined(SUPPORT_FILEFORMAT_OGG)
2020-09-14 19:20:38 +02:00
static Wave LoadOGG ( const unsigned char * fileData , unsigned int fileSize ) ; // Load OGG file
2017-03-26 22:49:01 +02:00
# endif
# if defined(SUPPORT_FILEFORMAT_FLAC)
2020-09-14 19:20:38 +02:00
static Wave LoadFLAC ( const unsigned char * fileData , unsigned int fileSize ) ; // Load FLAC file
2017-03-26 22:49:01 +02:00
# endif
2018-09-19 15:57:46 +02:00
# if defined(SUPPORT_FILEFORMAT_MP3)
2020-09-14 19:20:38 +02:00
static Wave LoadMP3 ( const unsigned char * fileData , unsigned int fileSize ) ; // Load MP3 file
2018-09-19 15:57:46 +02:00
# endif
2014-04-09 20:25:26 +02:00
2019-01-10 16:32:40 +01:00
# if defined(RAUDIO_STANDALONE)
2020-05-24 15:48:07 +02:00
static bool IsFileExtension ( const char * fileName , const char * ext ) ; // Check file extension
2021-05-07 15:38:13 +02:00
static const char * GetFileExtension ( const char * fileName ) ; // Get pointer to extension for a filename string (includes the dot: .png)
static bool TextIsEqual ( const char * text1 , const char * text2 ) ; // Check if two text string are equal
static const char * TextToLower ( const char * text ) ; // Get lower case version of provided string
2020-07-31 12:13:04 +02:00
static unsigned char * LoadFileData ( const char * fileName , unsigned int * bytesRead ) ; // Load file data as byte array (read)
2020-11-22 00:10:16 +01:00
static bool SaveFileData ( const char * fileName , void * data , unsigned int bytesToWrite ) ; // Save data to file from byte array (write)
static bool SaveFileText ( const char * fileName , char * text ) ; // Save text data to file (write), string must be '\0' terminated
2015-07-31 12:31:39 +02:00
# endif
2013-11-18 23:38:44 +01:00
//----------------------------------------------------------------------------------
2018-02-11 01:12:16 +01:00
// AudioBuffer management functions declaration
// NOTE: Those functions are not exposed by raylib... for the moment
2020-02-03 18:31:30 +01:00
//----------------------------------------------------------------------------------
2020-02-14 17:13:37 +01:00
AudioBuffer * LoadAudioBuffer ( ma_format format , ma_uint32 channels , ma_uint32 sampleRate , ma_uint32 sizeInFrames , int usage ) ;
void UnloadAudioBuffer ( AudioBuffer * buffer ) ;
2019-07-24 22:37:24 +02:00
bool IsAudioBufferPlaying ( AudioBuffer * buffer ) ;
void PlayAudioBuffer ( AudioBuffer * buffer ) ;
void StopAudioBuffer ( AudioBuffer * buffer ) ;
void PauseAudioBuffer ( AudioBuffer * buffer ) ;
void ResumeAudioBuffer ( AudioBuffer * buffer ) ;
void SetAudioBufferVolume ( AudioBuffer * buffer , float volume ) ;
void SetAudioBufferPitch ( AudioBuffer * buffer , float pitch ) ;
void TrackAudioBuffer ( AudioBuffer * buffer ) ;
void UntrackAudioBuffer ( AudioBuffer * buffer ) ;
2021-03-21 14:07:55 -07:00
int GetAudioStreamBufferSizeDefault ( ) ;
2017-11-15 22:04:23 +10:00
2018-02-11 01:12:16 +01:00
//----------------------------------------------------------------------------------
// Module Functions Definition - Audio Device initialization and Closing
//----------------------------------------------------------------------------------
2016-08-01 12:49:17 +02:00
// Initialize audio device
2014-09-03 17:06:10 +02:00
void InitAudioDevice ( void )
2013-11-18 23:38:44 +01:00
{
2020-02-03 18:31:30 +01:00
// TODO: Load AUDIO context memory dynamically?
2020-02-03 19:26:28 +01:00
2019-07-23 22:21:01 +02:00
// Init audio context
2020-02-03 18:31:30 +01:00
ma_context_config ctxConfig = ma_context_config_init ( ) ;
ctxConfig . logCallback = OnLog ;
2019-10-17 17:18:03 +02:00
2020-02-03 18:31:30 +01:00
ma_result result = ma_context_init ( NULL , 0 , & ctxConfig , & AUDIO . System . context ) ;
2019-03-12 11:54:45 +01:00
if ( result ! = MA_SUCCESS )
2017-11-12 14:17:05 +10:00
{
2021-02-05 19:19:44 +01:00
TRACELOG ( LOG_WARNING , " AUDIO: Failed to initialize context " ) ;
2017-11-12 14:17:05 +10:00
return ;
}
2019-07-23 22:21:01 +02:00
// Init audio device
// NOTE: Using the default device. Format is floating point because it simplifies mixing.
2019-03-12 11:54:45 +01:00
ma_device_config config = ma_device_config_init ( ma_device_type_playback ) ;
2020-02-03 18:31:30 +01:00
config . playback . pDeviceID = NULL ; // NULL for the default playback AUDIO.System.device.
2020-05-01 17:31:44 +02:00
config . playback . format = AUDIO_DEVICE_FORMAT ;
config . playback . channels = AUDIO_DEVICE_CHANNELS ;
config . capture . pDeviceID = NULL ; // NULL for the default capture AUDIO.System.device.
config . capture . format = ma_format_s16 ;
config . capture . channels = 1 ;
config . sampleRate = AUDIO_DEVICE_SAMPLE_RATE ;
config . dataCallback = OnSendAudioDataToDevice ;
config . pUserData = NULL ;
2019-03-12 11:54:45 +01:00
2020-02-03 18:31:30 +01:00
result = ma_device_init ( & AUDIO . System . context , & config , & AUDIO . System . device ) ;
2019-03-12 11:54:45 +01:00
if ( result ! = MA_SUCCESS )
2017-11-12 14:17:05 +10:00
{
2021-02-05 19:19:44 +01:00
TRACELOG ( LOG_WARNING , " AUDIO: Failed to initialize playback device " ) ;
2020-02-03 18:31:30 +01:00
ma_context_uninit ( & AUDIO . System . context ) ;
2017-11-12 14:17:05 +10:00
return ;
}
// Keep the device running the whole time. May want to consider doing something a bit smarter and only have the device running
// while there's at least one sound being played.
2020-02-03 18:31:30 +01:00
result = ma_device_start ( & AUDIO . System . device ) ;
2019-03-12 11:54:45 +01:00
if ( result ! = MA_SUCCESS )
2017-11-12 14:17:05 +10:00
{
2021-02-05 19:19:44 +01:00
TRACELOG ( LOG_WARNING , " AUDIO: Failed to start playback device " ) ;
2020-02-03 18:31:30 +01:00
ma_device_uninit ( & AUDIO . System . device ) ;
ma_context_uninit ( & AUDIO . System . context ) ;
2017-11-12 14:17:05 +10:00
return ;
}
// Mixing happens on a seperate thread which means we need to synchronize. I'm using a mutex here to make things simple, but may
// want to look at something a bit smarter later on to keep everything real-time, if that's necessary.
2020-07-10 13:29:42 +02:00
if ( ma_mutex_init ( & AUDIO . System . lock ) ! = MA_SUCCESS )
2017-11-12 14:17:05 +10:00
{
2021-02-05 19:19:44 +01:00
TRACELOG ( LOG_WARNING , " AUDIO: Failed to create mutex for mixing " ) ;
2020-02-03 18:31:30 +01:00
ma_device_uninit ( & AUDIO . System . device ) ;
ma_context_uninit ( & AUDIO . System . context ) ;
2017-11-12 14:17:05 +10:00
return ;
}
2021-02-14 16:37:34 +00:00
2021-02-07 19:20:30 +01:00
// Init dummy audio buffers pool for multichannel sound playing
for ( int i = 0 ; i < MAX_AUDIO_BUFFER_POOL_CHANNELS ; i + + )
{
2021-05-31 19:32:48 +02:00
// WARNING: An empty audio buffer is created (data = 0)
2021-02-07 19:20:30 +01:00
// AudioBuffer data just points to loaded sound data
2021-03-21 14:07:55 -07:00
AUDIO . MultiChannel . pool [ i ] = LoadAudioBuffer ( AUDIO_DEVICE_FORMAT , AUDIO_DEVICE_CHANNELS , AUDIO . System . device . sampleRate , 0 , AUDIO_BUFFER_USAGE_STATIC ) ;
2021-02-07 19:20:30 +01:00
}
2017-11-12 14:17:05 +10:00
2020-03-27 17:16:30 +01:00
TRACELOG ( LOG_INFO , " AUDIO: Device initialized successfully " ) ;
2020-05-23 23:19:59 +02:00
TRACELOG ( LOG_INFO , " > Backend: miniaudio / %s " , ma_get_backend_name ( AUDIO . System . context . backend ) ) ;
TRACELOG ( LOG_INFO , " > Format: %s -> %s " , ma_get_format_name ( AUDIO . System . device . playback . format ) , ma_get_format_name ( AUDIO . System . device . playback . internalFormat ) ) ;
TRACELOG ( LOG_INFO , " > Channels: %d -> %d " , AUDIO . System . device . playback . channels , AUDIO . System . device . playback . internalChannels ) ;
TRACELOG ( LOG_INFO , " > Sample rate: %d -> %d " , AUDIO . System . device . sampleRate , AUDIO . System . device . playback . internalSampleRate ) ;
TRACELOG ( LOG_INFO , " > Periods size: %d " , AUDIO . System . device . playback . internalPeriodSizeInFrames * AUDIO . System . device . playback . internalPeriods ) ;
2019-04-04 13:50:52 +02:00
2020-02-03 18:31:30 +01:00
AUDIO . System . isReady = true ;
2013-11-18 23:38:44 +01:00
}
2016-05-14 00:25:40 -07:00
// Close the audio device for all contexts
2014-09-03 17:06:10 +02:00
void CloseAudioDevice ( void )
2013-11-18 23:38:44 +01:00
{
2020-02-03 18:31:30 +01:00
if ( AUDIO . System . isReady )
2019-07-23 22:21:01 +02:00
{
2021-02-07 19:20:30 +01:00
// Unload dummy audio buffers pool
// WARNING: They can be pointing to already unloaded data
2021-02-14 16:37:34 +00:00
for ( int i = 0 ; i < MAX_AUDIO_BUFFER_POOL_CHANNELS ; i + + )
2021-02-07 19:20:30 +01:00
{
//UnloadAudioBuffer(AUDIO.MultiChannel.pool[i]);
if ( AUDIO . MultiChannel . pool [ i ] ! = NULL )
{
ma_data_converter_uninit ( & AUDIO . MultiChannel . pool [ i ] - > converter ) ;
UntrackAudioBuffer ( AUDIO . MultiChannel . pool [ i ] ) ;
//RL_FREE(buffer->data); // Already unloaded by UnloadSound()
RL_FREE ( AUDIO . MultiChannel . pool [ i ] ) ;
}
}
2021-02-14 16:37:34 +00:00
2020-02-03 18:31:30 +01:00
ma_mutex_uninit ( & AUDIO . System . lock ) ;
ma_device_uninit ( & AUDIO . System . device ) ;
ma_context_uninit ( & AUDIO . System . context ) ;
2017-11-12 14:17:05 +10:00
2021-02-05 19:45:13 +01:00
AUDIO . System . isReady = false ;
2019-06-29 09:49:42 +01:00
2020-03-27 17:16:30 +01:00
TRACELOG ( LOG_INFO , " AUDIO: Device closed successfully " ) ;
2019-07-23 22:21:01 +02:00
}
2020-03-27 17:16:30 +01:00
else TRACELOG ( LOG_WARNING , " AUDIO: Device could not be closed, not currently initialized " ) ;
2013-11-18 23:38:44 +01:00
}
2016-07-15 18:16:34 +02:00
// Check if device has been initialized successfully
2016-04-30 16:05:43 -07:00
bool IsAudioDeviceReady ( void )
2016-04-29 23:00:12 -07:00
{
2020-02-03 18:31:30 +01:00
return AUDIO . System . isReady ;
2016-04-29 23:00:12 -07:00
}
2017-02-06 00:44:54 +01:00
// Set master volume (listener)
void SetMasterVolume ( float volume )
{
2020-02-04 22:43:31 +10:00
ma_device_set_master_volume ( & AUDIO . System . device , volume ) ;
2017-02-06 00:44:54 +01:00
}
2017-11-18 08:42:14 +10:00
//----------------------------------------------------------------------------------
2018-02-11 01:12:16 +01:00
// Module Functions Definition - Audio Buffer management
2017-11-18 08:42:14 +10:00
//----------------------------------------------------------------------------------
2018-10-18 11:38:42 +02:00
2019-09-03 23:08:02 +02:00
// Initialize a new audio buffer (filled with silence)
2020-02-14 17:13:37 +01:00
AudioBuffer * LoadAudioBuffer ( ma_format format , ma_uint32 channels , ma_uint32 sampleRate , ma_uint32 sizeInFrames , int usage )
2017-11-18 08:42:14 +10:00
{
2019-08-27 10:56:49 +02:00
AudioBuffer * audioBuffer = ( AudioBuffer * ) RL_CALLOC ( 1 , sizeof ( AudioBuffer ) ) ;
2019-10-17 17:18:03 +02:00
2017-11-18 08:42:14 +10:00
if ( audioBuffer = = NULL )
{
2021-02-05 19:19:44 +01:00
TRACELOG ( LOG_WARNING , " AUDIO: Failed to allocate memory for buffer " ) ;
2017-11-18 08:42:14 +10:00
return NULL ;
}
2020-02-03 19:26:28 +01:00
2020-05-22 00:32:23 +02:00
if ( sizeInFrames > 0 ) audioBuffer - > data = RL_CALLOC ( sizeInFrames * channels * ma_get_bytes_per_sample ( format ) , 1 ) ;
2017-11-18 08:42:14 +10:00
2019-07-23 22:21:01 +02:00
// Audio data runs through a format converter
2021-03-21 14:07:55 -07:00
ma_data_converter_config converterConfig = ma_data_converter_config_init ( format , AUDIO_DEVICE_FORMAT , channels , AUDIO_DEVICE_CHANNELS , sampleRate , AUDIO . System . device . sampleRate ) ;
2020-02-04 22:43:31 +10:00
converterConfig . resampling . allowDynamicSampleRate = true ; // Required for pitch shifting
ma_result result = ma_data_converter_init ( & converterConfig , & audioBuffer - > converter ) ;
2019-04-04 13:50:52 +02:00
2019-03-12 11:54:45 +01:00
if ( result ! = MA_SUCCESS )
2017-12-20 11:37:43 +01:00
{
2021-02-05 19:19:44 +01:00
TRACELOG ( LOG_WARNING , " AUDIO: Failed to create data conversion pipeline " ) ;
2019-04-23 14:55:35 +02:00
RL_FREE ( audioBuffer ) ;
2017-11-18 08:42:14 +10:00
return NULL ;
}
2019-07-23 22:21:01 +02:00
// Init audio buffer values
2019-03-20 10:57:41 +01:00
audioBuffer - > volume = 1.0f ;
audioBuffer - > pitch = 1.0f ;
audioBuffer - > playing = false ;
audioBuffer - > paused = false ;
audioBuffer - > looping = false ;
2017-11-18 08:42:14 +10:00
audioBuffer - > usage = usage ;
audioBuffer - > frameCursorPos = 0 ;
2020-02-03 18:31:30 +01:00
audioBuffer - > sizeInFrames = sizeInFrames ;
2017-11-18 08:42:14 +10:00
2019-07-23 22:21:01 +02:00
// Buffers should be marked as processed by default so that a call to
// UpdateAudioStream() immediately after initialization works correctly
2017-11-18 08:42:14 +10:00
audioBuffer - > isSubBufferProcessed [ 0 ] = true ;
audioBuffer - > isSubBufferProcessed [ 1 ] = true ;
2019-07-23 22:21:01 +02:00
// Track audio buffer to linked list next position
2017-11-18 08:42:14 +10:00
TrackAudioBuffer ( audioBuffer ) ;
return audioBuffer ;
}
2018-02-11 01:12:16 +01:00
// Delete an audio buffer
2020-02-14 17:13:37 +01:00
void UnloadAudioBuffer ( AudioBuffer * buffer )
2017-11-18 08:42:14 +10:00
{
2019-07-24 22:37:24 +02:00
if ( buffer ! = NULL )
2017-11-18 08:42:14 +10:00
{
2020-02-04 22:43:31 +10:00
ma_data_converter_uninit ( & buffer - > converter ) ;
2019-07-24 22:37:24 +02:00
UntrackAudioBuffer ( buffer ) ;
2020-02-03 18:31:30 +01:00
RL_FREE ( buffer - > data ) ;
2019-07-24 22:37:24 +02:00
RL_FREE ( buffer ) ;
2017-11-18 08:42:14 +10:00
}
}
2018-02-11 01:12:16 +01:00
// Check if an audio buffer is playing
2019-07-24 22:37:24 +02:00
bool IsAudioBufferPlaying ( AudioBuffer * buffer )
2017-11-18 08:42:14 +10:00
{
2019-07-24 22:37:24 +02:00
bool result = false ;
2019-10-17 17:18:03 +02:00
2019-07-24 22:37:24 +02:00
if ( buffer ! = NULL ) result = ( buffer - > playing & & ! buffer - > paused ) ;
2017-11-18 08:42:14 +10:00
2019-07-24 22:37:24 +02:00
return result ;
2017-11-18 08:42:14 +10:00
}
2018-02-11 01:12:16 +01:00
// Play an audio buffer
// NOTE: Buffer is restarted to the start.
// Use PauseAudioBuffer() and ResumeAudioBuffer() if the playback position should be maintained.
2019-07-24 22:37:24 +02:00
void PlayAudioBuffer ( AudioBuffer * buffer )
2017-11-18 08:42:14 +10:00
{
2019-07-24 22:37:24 +02:00
if ( buffer ! = NULL )
2017-11-18 08:42:14 +10:00
{
2019-07-24 22:37:24 +02:00
buffer - > playing = true ;
buffer - > paused = false ;
buffer - > frameCursorPos = 0 ;
2017-11-18 08:42:14 +10:00
}
}
2018-02-11 01:12:16 +01:00
// Stop an audio buffer
2019-07-24 22:37:24 +02:00
void StopAudioBuffer ( AudioBuffer * buffer )
2017-11-18 08:42:14 +10:00
{
2019-07-24 22:37:24 +02:00
if ( buffer ! = NULL )
2017-11-18 08:42:14 +10:00
{
2019-07-24 22:37:24 +02:00
if ( IsAudioBufferPlaying ( buffer ) )
{
buffer - > playing = false ;
buffer - > paused = false ;
buffer - > frameCursorPos = 0 ;
2019-09-03 23:08:02 +02:00
buffer - > totalFramesProcessed = 0 ;
2019-07-24 22:37:24 +02:00
buffer - > isSubBufferProcessed [ 0 ] = true ;
buffer - > isSubBufferProcessed [ 1 ] = true ;
}
2017-11-18 08:42:14 +10:00
}
}
2018-02-11 01:12:16 +01:00
// Pause an audio buffer
2019-07-24 22:37:24 +02:00
void PauseAudioBuffer ( AudioBuffer * buffer )
2017-11-18 08:42:14 +10:00
{
2019-07-24 22:37:24 +02:00
if ( buffer ! = NULL ) buffer - > paused = true ;
2017-11-18 08:42:14 +10:00
}
2018-02-11 01:12:16 +01:00
// Resume an audio buffer
2019-07-24 22:37:24 +02:00
void ResumeAudioBuffer ( AudioBuffer * buffer )
2017-11-18 08:42:14 +10:00
{
2019-07-24 22:37:24 +02:00
if ( buffer ! = NULL ) buffer - > paused = false ;
2017-11-18 08:42:14 +10:00
}
2018-02-11 01:12:16 +01:00
// Set volume for an audio buffer
2019-07-24 22:37:24 +02:00
void SetAudioBufferVolume ( AudioBuffer * buffer , float volume )
2017-11-18 08:42:14 +10:00
{
2019-07-24 22:37:24 +02:00
if ( buffer ! = NULL ) buffer - > volume = volume ;
2017-11-18 08:42:14 +10:00
}
2018-02-11 01:12:16 +01:00
// Set pitch for an audio buffer
2019-07-24 22:37:24 +02:00
void SetAudioBufferPitch ( AudioBuffer * buffer , float pitch )
2017-11-18 08:42:14 +10:00
{
2020-12-12 13:07:46 +01:00
if ( ( buffer ! = NULL ) & & ( pitch > 0.0f ) )
2017-11-18 08:42:14 +10:00
{
2019-10-17 17:18:03 +02:00
// Pitching is just an adjustment of the sample rate.
// Note that this changes the duration of the sound:
2019-09-03 23:08:02 +02:00
// - higher pitches will make the sound faster
// - lower pitches make it slower
2020-12-12 13:01:31 +01:00
ma_uint32 outputSampleRate = ( ma_uint32 ) ( ( float ) buffer - > converter . config . sampleRateOut / pitch ) ;
ma_data_converter_set_rate ( & buffer - > converter , buffer - > converter . config . sampleRateIn , outputSampleRate ) ;
2020-12-23 15:03:26 +01:00
2020-12-12 13:01:31 +01:00
buffer - > pitch = pitch ;
2019-07-24 22:37:24 +02:00
}
2017-11-18 08:42:14 +10:00
}
2018-02-11 01:12:16 +01:00
// Track audio buffer to linked list next position
2019-07-24 22:37:24 +02:00
void TrackAudioBuffer ( AudioBuffer * buffer )
2018-02-11 01:12:16 +01:00
{
2020-02-03 18:31:30 +01:00
ma_mutex_lock ( & AUDIO . System . lock ) ;
2018-02-11 01:12:16 +01:00
{
2020-02-03 18:31:30 +01:00
if ( AUDIO . Buffer . first = = NULL ) AUDIO . Buffer . first = buffer ;
2018-11-06 15:10:50 +01:00
else
2018-02-11 01:12:16 +01:00
{
2020-02-03 18:31:30 +01:00
AUDIO . Buffer . last - > next = buffer ;
buffer - > prev = AUDIO . Buffer . last ;
2018-02-11 01:12:16 +01:00
}
2020-02-03 18:31:30 +01:00
AUDIO . Buffer . last = buffer ;
2018-02-11 01:12:16 +01:00
}
2020-02-03 18:31:30 +01:00
ma_mutex_unlock ( & AUDIO . System . lock ) ;
2018-02-11 01:12:16 +01:00
}
// Untrack audio buffer from linked list
2019-07-24 22:37:24 +02:00
void UntrackAudioBuffer ( AudioBuffer * buffer )
2018-02-11 01:12:16 +01:00
{
2020-02-03 18:31:30 +01:00
ma_mutex_lock ( & AUDIO . System . lock ) ;
2018-02-11 01:12:16 +01:00
{
2020-02-03 18:31:30 +01:00
if ( buffer - > prev = = NULL ) AUDIO . Buffer . first = buffer - > next ;
2019-07-24 22:37:24 +02:00
else buffer - > prev - > next = buffer - > next ;
2018-02-11 01:12:16 +01:00
2020-02-03 18:31:30 +01:00
if ( buffer - > next = = NULL ) AUDIO . Buffer . last = buffer - > prev ;
2019-07-24 22:37:24 +02:00
else buffer - > next - > prev = buffer - > prev ;
2018-02-11 01:12:16 +01:00
2019-07-24 22:37:24 +02:00
buffer - > prev = NULL ;
buffer - > next = NULL ;
2018-02-11 01:12:16 +01:00
}
2020-02-03 18:31:30 +01:00
ma_mutex_unlock ( & AUDIO . System . lock ) ;
2018-02-11 01:12:16 +01:00
}
2017-11-18 08:42:14 +10:00
2014-04-19 16:36:49 +02:00
//----------------------------------------------------------------------------------
// Module Functions Definition - Sounds loading and playing (.WAV)
//----------------------------------------------------------------------------------
2016-12-25 01:58:56 +01:00
// Load wave data from file
2016-09-08 00:20:06 +02:00
Wave LoadWave ( const char * fileName )
2013-11-18 23:38:44 +01:00
{
2016-01-23 13:22:13 +01:00
Wave wave = { 0 } ;
2020-11-03 23:47:33 +01:00
2020-09-13 15:38:57 +02:00
// Loading file to memory
unsigned int fileSize = 0 ;
unsigned char * fileData = LoadFileData ( fileName , & fileSize ) ;
2020-09-16 11:33:56 +02:00
if ( fileData ! = NULL )
{
// Loading wave from memory data
wave = LoadWaveFromMemory ( GetFileExtension ( fileName ) , fileData , fileSize ) ;
2020-09-13 15:38:57 +02:00
2020-09-16 11:33:56 +02:00
RL_FREE ( fileData ) ;
}
2014-09-16 22:51:31 +02:00
2020-09-13 15:38:57 +02:00
return wave ;
}
2021-04-06 13:04:15 +02:00
// Load wave from memory buffer, fileType refers to extension: i.e. ".wav"
2020-09-14 19:20:38 +02:00
Wave LoadWaveFromMemory ( const char * fileType , const unsigned char * fileData , int dataSize )
2020-09-13 15:38:57 +02:00
{
Wave wave = { 0 } ;
2020-11-03 23:47:33 +01:00
2020-09-13 15:38:57 +02:00
char fileExtLower [ 16 ] = { 0 } ;
strcpy ( fileExtLower , TextToLower ( fileType ) ) ;
2020-11-03 23:47:33 +01:00
2019-07-24 22:37:24 +02:00
if ( false ) { }
2019-02-14 12:32:23 +02:00
# if defined(SUPPORT_FILEFORMAT_WAV)
2021-03-02 01:07:08 +01:00
else if ( TextIsEqual ( fileExtLower , " .wav " ) ) wave = LoadWAV ( fileData , dataSize ) ;
2019-02-14 12:32:23 +02:00
# endif
2017-03-26 22:49:01 +02:00
# if defined(SUPPORT_FILEFORMAT_OGG)
2021-03-02 01:07:08 +01:00
else if ( TextIsEqual ( fileExtLower , " .ogg " ) ) wave = LoadOGG ( fileData , dataSize ) ;
2017-03-26 22:49:01 +02:00
# endif
# if defined(SUPPORT_FILEFORMAT_FLAC)
2021-03-02 01:07:08 +01:00
else if ( TextIsEqual ( fileExtLower , " .flac " ) ) wave = LoadFLAC ( fileData , dataSize ) ;
2018-09-19 15:57:46 +02:00
# endif
# if defined(SUPPORT_FILEFORMAT_MP3)
2021-03-02 01:07:08 +01:00
else if ( TextIsEqual ( fileExtLower , " .mp3 " ) ) wave = LoadMP3 ( fileData , dataSize ) ;
2017-03-26 22:49:01 +02:00
# endif
2020-09-13 15:38:57 +02:00
else TRACELOG ( LOG_WARNING , " WAVE: File format not supported " ) ;
2020-11-03 23:47:33 +01:00
2016-09-08 00:20:06 +02:00
return wave ;
}
2016-08-16 11:09:55 +02:00
2016-12-25 01:58:56 +01:00
// Load sound from file
2016-09-08 00:20:06 +02:00
// NOTE: The entire file is loaded to memory to be played (no-streaming)
Sound LoadSound ( const char * fileName )
{
Wave wave = LoadWave ( fileName ) ;
2017-01-28 23:02:30 +01:00
2016-09-08 00:20:06 +02:00
Sound sound = LoadSoundFromWave ( wave ) ;
2017-01-28 23:02:30 +01:00
2016-09-08 00:20:06 +02:00
UnloadWave ( wave ) ; // Sound is loaded, we can unload wave
2014-09-03 16:51:28 +02:00
2013-11-23 13:30:54 +01:00
return sound ;
2013-11-18 23:38:44 +01:00
}
2014-12-15 01:08:30 +01:00
// Load sound from wave data
2016-08-01 12:49:17 +02:00
// NOTE: Wave data must be unallocated manually
2014-12-15 01:08:30 +01:00
Sound LoadSoundFromWave ( Wave wave )
{
2016-01-23 13:22:13 +01:00
Sound sound = { 0 } ;
2014-12-15 01:08:30 +01:00
if ( wave . data ! = NULL )
{
2019-10-17 17:18:03 +02:00
// When using miniaudio we need to do our own mixing.
2019-07-23 22:21:01 +02:00
// To simplify this we need convert the format of each sound to be consistent with
2020-02-03 18:31:30 +01:00
// the format used to open the playback AUDIO.System.device. We can do this two ways:
2018-11-06 15:10:50 +01:00
//
2017-11-12 14:17:05 +10:00
// 1) Convert the whole sound in one go at load time (here).
// 2) Convert the audio data in chunks at mixing time.
//
2019-07-23 22:21:01 +02:00
// First option has been selected, format conversion is done on the loading stage.
// The downside is that it uses more memory if the original sound is u8 or s16.
2019-03-12 11:54:45 +01:00
ma_format formatIn = ( ( wave . sampleSize = = 8 ) ? ma_format_u8 : ( ( wave . sampleSize = = 16 ) ? ma_format_s16 : ma_format_f32 ) ) ;
ma_uint32 frameCountIn = wave . sampleCount / wave . channels ;
2017-11-12 14:17:05 +10:00
2021-03-21 14:07:55 -07:00
ma_uint32 frameCount = ( ma_uint32 ) ma_convert_frames ( NULL , 0 , AUDIO_DEVICE_FORMAT , AUDIO_DEVICE_CHANNELS , AUDIO . System . device . sampleRate , NULL , frameCountIn , formatIn , wave . channels , wave . sampleRate ) ;
2020-03-27 17:16:30 +01:00
if ( frameCount = = 0 ) TRACELOG ( LOG_WARNING , " SOUND: Failed to get frame count for format conversion " ) ;
2017-11-12 14:17:05 +10:00
2021-03-21 14:07:55 -07:00
AudioBuffer * audioBuffer = LoadAudioBuffer ( AUDIO_DEVICE_FORMAT , AUDIO_DEVICE_CHANNELS , AUDIO . System . device . sampleRate , frameCount , AUDIO_BUFFER_USAGE_STATIC ) ;
2020-12-31 13:28:16 +01:00
if ( audioBuffer = = NULL )
{
TRACELOG ( LOG_WARNING , " SOUND: Failed to create buffer " ) ;
return sound ; // early return to avoid dereferencing the audioBuffer null pointer
}
2017-11-15 22:04:23 +10:00
2021-03-21 14:07:55 -07:00
frameCount = ( ma_uint32 ) ma_convert_frames ( audioBuffer - > data , frameCount , AUDIO_DEVICE_FORMAT , AUDIO_DEVICE_CHANNELS , AUDIO . System . device . sampleRate , wave . data , frameCountIn , formatIn , wave . channels , wave . sampleRate ) ;
2020-03-27 17:16:30 +01:00
if ( frameCount = = 0 ) TRACELOG ( LOG_WARNING , " SOUND: Failed format conversion " ) ;
2017-11-12 14:17:05 +10:00
2020-02-14 17:13:37 +01:00
sound . sampleCount = frameCount * AUDIO_DEVICE_CHANNELS ;
2021-03-21 14:07:55 -07:00
sound . stream . sampleRate = AUDIO . System . device . sampleRate ;
2019-07-24 14:48:45 +02:00
sound . stream . sampleSize = 32 ;
2020-02-14 17:13:37 +01:00
sound . stream . channels = AUDIO_DEVICE_CHANNELS ;
2019-07-23 22:21:01 +02:00
sound . stream . buffer = audioBuffer ;
2014-12-15 01:08:30 +01:00
}
return sound ;
}
2016-12-25 01:58:56 +01:00
// Unload wave data
2016-09-08 00:20:06 +02:00
void UnloadWave ( Wave wave )
{
2019-04-23 14:55:35 +02:00
if ( wave . data ! = NULL ) RL_FREE ( wave . data ) ;
2016-09-08 00:20:06 +02:00
2020-03-27 17:16:30 +01:00
TRACELOG ( LOG_INFO , " WAVE: Unloaded wave data from RAM " ) ;
2016-09-08 00:20:06 +02:00
}
2013-11-18 23:38:44 +01:00
// Unload sound
void UnloadSound ( Sound sound )
{
2020-02-14 17:13:37 +01:00
UnloadAudioBuffer ( sound . stream . buffer ) ;
2016-07-29 21:35:57 +02:00
2020-03-27 17:16:30 +01:00
TRACELOG ( LOG_INFO , " WAVE: Unloaded sound data from RAM " ) ;
2013-11-18 23:38:44 +01:00
}
2016-08-29 11:17:58 +02:00
// Update sound buffer with new data
2017-02-09 22:19:48 +01:00
void UpdateSound ( Sound sound , const void * data , int samplesCount )
2016-08-29 11:17:58 +02:00
{
2020-02-14 17:13:37 +01:00
if ( sound . stream . buffer ! = NULL )
2017-11-12 14:17:05 +10:00
{
2020-02-14 17:13:37 +01:00
StopAudioBuffer ( sound . stream . buffer ) ;
2017-11-12 14:17:05 +10:00
2019-09-03 23:08:02 +02:00
// TODO: May want to lock/unlock this since this data buffer is read at mixing time
2020-02-14 17:13:37 +01:00
memcpy ( sound . stream . buffer - > data , data , samplesCount * ma_get_bytes_per_frame ( sound . stream . buffer - > converter . config . formatIn , sound . stream . buffer - > converter . config . channelsIn ) ) ;
2019-09-03 23:08:02 +02:00
}
2016-08-29 11:17:58 +02:00
}
2018-09-17 16:56:02 +02:00
// Export wave data to file
2020-11-22 00:10:16 +01:00
bool ExportWave ( Wave wave , const char * fileName )
2018-09-17 16:56:02 +02:00
{
bool success = false ;
2018-11-06 15:10:50 +01:00
2019-07-24 22:37:24 +02:00
if ( false ) { }
2019-02-14 12:32:23 +02:00
# if defined(SUPPORT_FILEFORMAT_WAV)
2019-07-24 22:37:24 +02:00
else if ( IsFileExtension ( fileName , " .wav " ) ) success = SaveWAV ( wave , fileName ) ;
2019-02-14 12:32:23 +02:00
# endif
2018-11-06 15:10:50 +01:00
else if ( IsFileExtension ( fileName , " .raw " ) )
2018-09-17 16:56:02 +02:00
{
2018-10-29 16:18:06 +01:00
// Export raw sample data (without header)
// NOTE: It's up to the user to track wave parameters
2020-11-22 00:10:16 +01:00
success = SaveFileData ( fileName , wave . data , wave . sampleCount * wave . sampleSize / 8 ) ;
2018-09-17 16:56:02 +02:00
}
2018-11-06 15:10:50 +01:00
2020-03-27 17:16:30 +01:00
if ( success ) TRACELOG ( LOG_INFO , " FILEIO: [%s] Wave data exported successfully " , fileName ) ;
else TRACELOG ( LOG_WARNING , " FILEIO: [%s] Failed to export wave data " , fileName ) ;
2020-12-23 15:03:26 +01:00
2020-11-22 00:10:16 +01:00
return success ;
2018-09-17 16:56:02 +02:00
}
2018-10-29 16:18:06 +01:00
// Export wave sample data to code (.h)
2020-11-22 00:10:16 +01:00
bool ExportWaveAsCode ( Wave wave , const char * fileName )
2018-10-29 16:18:06 +01:00
{
2020-11-22 00:10:16 +01:00
bool success = false ;
2020-12-23 15:03:26 +01:00
2020-05-24 15:48:07 +02:00
# ifndef TEXT_BYTES_PER_LINE
# define TEXT_BYTES_PER_LINE 20
# endif
2018-11-06 15:10:50 +01:00
2020-05-24 15:48:07 +02:00
int waveDataSize = wave . sampleCount * wave . channels * wave . sampleSize / 8 ;
2018-11-06 15:10:50 +01:00
2020-05-24 15:48:07 +02:00
// NOTE: Text data buffer size is estimated considering wave data size in bytes
// and requiring 6 char bytes for every byte: "0x00, "
char * txtData = ( char * ) RL_CALLOC ( 6 * waveDataSize + 2000 , sizeof ( char ) ) ;
2019-10-17 17:18:03 +02:00
2020-05-24 15:48:07 +02:00
int bytesCount = 0 ;
bytesCount + = sprintf ( txtData + bytesCount , " \n ////////////////////////////////////////////////////////////////////////////////// \n " ) ;
bytesCount + = sprintf ( txtData + bytesCount , " // // \n " ) ;
bytesCount + = sprintf ( txtData + bytesCount , " // WaveAsCode exporter v1.0 - Wave data exported as an array of bytes // \n " ) ;
bytesCount + = sprintf ( txtData + bytesCount , " // // \n " ) ;
bytesCount + = sprintf ( txtData + bytesCount , " // more info and bugs-report: github.com/raysan5/raylib // \n " ) ;
bytesCount + = sprintf ( txtData + bytesCount , " // feedback and support: ray[at]raylib.com // \n " ) ;
bytesCount + = sprintf ( txtData + bytesCount , " // // \n " ) ;
bytesCount + = sprintf ( txtData + bytesCount , " // Copyright (c) 2018 Ramon Santamaria (@raysan5) // \n " ) ;
bytesCount + = sprintf ( txtData + bytesCount , " // // \n " ) ;
bytesCount + = sprintf ( txtData + bytesCount , " ////////////////////////////////////////////////////////////////////////////////// \n \n " ) ;
2018-10-29 16:18:06 +01:00
2020-05-24 15:48:07 +02:00
char varFileName [ 256 ] = { 0 } ;
2019-02-12 12:18:01 +01:00
# if !defined(RAUDIO_STANDALONE)
2020-05-24 15:48:07 +02:00
// Get file name from path and convert variable name to uppercase
strcpy ( varFileName , GetFileNameWithoutExt ( fileName ) ) ;
for ( int i = 0 ; varFileName [ i ] ! = ' \0 ' ; i + + ) if ( varFileName [ i ] > = ' a ' & & varFileName [ i ] < = ' z ' ) { varFileName [ i ] = varFileName [ i ] - 32 ; }
2019-02-12 12:18:01 +01:00
# else
2020-05-24 15:48:07 +02:00
strcpy ( varFileName , fileName ) ;
2019-02-12 12:18:01 +01:00
# endif
2018-11-06 15:10:50 +01:00
2020-05-24 15:48:07 +02:00
bytesCount + = sprintf ( txtData + bytesCount , " // Wave data information \n " ) ;
bytesCount + = sprintf ( txtData + bytesCount , " #define %s_SAMPLE_COUNT %u \n " , varFileName , wave . sampleCount ) ;
bytesCount + = sprintf ( txtData + bytesCount , " #define %s_SAMPLE_RATE %u \n " , varFileName , wave . sampleRate ) ;
bytesCount + = sprintf ( txtData + bytesCount , " #define %s_SAMPLE_SIZE %u \n " , varFileName , wave . sampleSize ) ;
bytesCount + = sprintf ( txtData + bytesCount , " #define %s_CHANNELS %u \n \n " , varFileName , wave . channels ) ;
2018-10-29 16:18:06 +01:00
2020-05-24 15:48:07 +02:00
// Write byte data as hexadecimal text
bytesCount + = sprintf ( txtData + bytesCount , " static unsigned char %s_DATA[%i] = { " , varFileName , waveDataSize ) ;
for ( int i = 0 ; i < waveDataSize - 1 ; i + + ) bytesCount + = sprintf ( txtData + bytesCount , ( ( i % TEXT_BYTES_PER_LINE = = 0 ) ? " 0x%x, \n " : " 0x%x, " ) , ( ( unsigned char * ) wave . data ) [ i ] ) ;
bytesCount + = sprintf ( txtData + bytesCount , " 0x%x }; \n " , ( ( unsigned char * ) wave . data ) [ waveDataSize - 1 ] ) ;
2018-10-29 16:18:06 +01:00
2020-05-24 15:48:07 +02:00
// NOTE: Text data length exported is determined by '\0' (NULL) character
2020-11-22 00:10:16 +01:00
success = SaveFileText ( fileName , txtData ) ;
2020-05-24 15:48:07 +02:00
RL_FREE ( txtData ) ;
2020-12-23 15:03:26 +01:00
2020-11-22 00:10:16 +01:00
return success ;
2018-10-29 16:18:06 +01:00
}
2013-11-18 23:38:44 +01:00
// Play a sound
void PlaySound ( Sound sound )
{
2019-07-24 22:37:24 +02:00
PlayAudioBuffer ( sound . stream . buffer ) ;
2013-11-18 23:38:44 +01:00
}
2019-06-29 11:26:08 +02:00
// Play a sound in the multichannel buffer pool
void PlaySoundMulti ( Sound sound )
2019-06-29 09:49:42 +01:00
{
2019-06-29 11:26:08 +02:00
int index = - 1 ;
2019-09-03 23:08:02 +02:00
unsigned int oldAge = 0 ;
2019-06-29 09:49:42 +01:00
int oldIndex = - 1 ;
// find the first non playing pool entry
2019-06-29 11:26:08 +02:00
for ( int i = 0 ; i < MAX_AUDIO_BUFFER_POOL_CHANNELS ; i + + )
{
2020-02-03 18:31:30 +01:00
if ( AUDIO . MultiChannel . channels [ i ] > oldAge )
2019-06-29 11:26:08 +02:00
{
2020-02-03 18:31:30 +01:00
oldAge = AUDIO . MultiChannel . channels [ i ] ;
2019-06-29 09:49:42 +01:00
oldIndex = i ;
}
2019-10-17 17:18:03 +02:00
2020-02-03 18:31:30 +01:00
if ( ! IsAudioBufferPlaying ( AUDIO . MultiChannel . pool [ i ] ) )
2019-06-29 11:26:08 +02:00
{
index = i ;
2019-06-29 09:49:42 +01:00
break ;
}
}
2019-06-29 11:26:08 +02:00
// If no none playing pool members can be index choose the oldest
if ( index = = - 1 )
{
2020-03-27 17:16:30 +01:00
TRACELOG ( LOG_WARNING , " SOUND: Buffer pool is already full, count: %i " , AUDIO . MultiChannel . poolCounter ) ;
2019-10-17 17:18:03 +02:00
2019-06-29 11:26:08 +02:00
if ( oldIndex = = - 1 )
{
// Shouldn't be able to get here... but just in case something odd happens!
2020-03-27 17:16:30 +01:00
TRACELOG ( LOG_WARNING , " SOUND: Buffer pool could not determine oldest buffer not playing sound " ) ;
2019-06-29 09:49:42 +01:00
return ;
}
2019-10-17 17:18:03 +02:00
2019-06-29 11:26:08 +02:00
index = oldIndex ;
2019-10-17 17:18:03 +02:00
2019-06-29 11:26:08 +02:00
// Just in case...
2020-02-03 18:31:30 +01:00
StopAudioBuffer ( AUDIO . MultiChannel . pool [ index ] ) ;
2019-06-29 09:49:42 +01:00
}
2019-06-29 11:26:08 +02:00
// Experimentally mutex lock doesn't seem to be needed this makes sense
2020-02-14 17:13:37 +01:00
// as pool[index] isn't playing and the only stuff we're copying
2019-06-29 09:49:42 +01:00
// shouldn't be changing...
2020-02-03 18:31:30 +01:00
AUDIO . MultiChannel . channels [ index ] = AUDIO . MultiChannel . poolCounter ;
AUDIO . MultiChannel . poolCounter + + ;
2019-10-17 17:18:03 +02:00
2020-02-03 18:31:30 +01:00
AUDIO . MultiChannel . pool [ index ] - > volume = sound . stream . buffer - > volume ;
AUDIO . MultiChannel . pool [ index ] - > pitch = sound . stream . buffer - > pitch ;
AUDIO . MultiChannel . pool [ index ] - > looping = sound . stream . buffer - > looping ;
AUDIO . MultiChannel . pool [ index ] - > usage = sound . stream . buffer - > usage ;
AUDIO . MultiChannel . pool [ index ] - > isSubBufferProcessed [ 0 ] = false ;
AUDIO . MultiChannel . pool [ index ] - > isSubBufferProcessed [ 1 ] = false ;
AUDIO . MultiChannel . pool [ index ] - > sizeInFrames = sound . stream . buffer - > sizeInFrames ;
AUDIO . MultiChannel . pool [ index ] - > data = sound . stream . buffer - > data ;
2019-06-29 09:49:42 +01:00
2020-02-03 18:31:30 +01:00
PlayAudioBuffer ( AUDIO . MultiChannel . pool [ index ] ) ;
2019-06-29 09:49:42 +01:00
}
2019-06-29 11:26:08 +02:00
// Stop any sound played with PlaySoundMulti()
void StopSoundMulti ( void )
2019-06-29 09:49:42 +01:00
{
2020-02-03 18:31:30 +01:00
for ( int i = 0 ; i < MAX_AUDIO_BUFFER_POOL_CHANNELS ; i + + ) StopAudioBuffer ( AUDIO . MultiChannel . pool [ i ] ) ;
2019-06-29 09:49:42 +01:00
}
2019-06-29 11:26:08 +02:00
// Get number of sounds playing in the multichannel buffer pool
int GetSoundsPlaying ( void )
2019-06-29 09:49:42 +01:00
{
2019-06-29 11:26:08 +02:00
int counter = 0 ;
2019-10-17 17:18:03 +02:00
2019-06-29 11:26:08 +02:00
for ( int i = 0 ; i < MAX_AUDIO_BUFFER_POOL_CHANNELS ; i + + )
{
2020-02-03 18:31:30 +01:00
if ( IsAudioBufferPlaying ( AUDIO . MultiChannel . pool [ i ] ) ) counter + + ;
2019-06-29 09:49:42 +01:00
}
2019-10-17 17:18:03 +02:00
2019-06-29 11:26:08 +02:00
return counter ;
2019-06-29 09:49:42 +01:00
}
2013-11-18 23:38:44 +01:00
// Pause a sound
void PauseSound ( Sound sound )
{
2019-07-24 22:37:24 +02:00
PauseAudioBuffer ( sound . stream . buffer ) ;
2013-11-18 23:38:44 +01:00
}
2016-08-01 12:49:17 +02:00
// Resume a paused sound
void ResumeSound ( Sound sound )
{
2019-07-24 22:37:24 +02:00
ResumeAudioBuffer ( sound . stream . buffer ) ;
2016-08-01 12:49:17 +02:00
}
2013-11-18 23:38:44 +01:00
// Stop reproducing a sound
void StopSound ( Sound sound )
{
2019-07-24 22:37:24 +02:00
StopAudioBuffer ( sound . stream . buffer ) ;
2013-11-18 23:38:44 +01:00
}
2014-01-23 12:36:18 +01:00
// Check if a sound is playing
2016-05-03 18:04:21 +02:00
bool IsSoundPlaying ( Sound sound )
2014-01-23 12:36:18 +01:00
{
2019-07-24 22:37:24 +02:00
return IsAudioBufferPlaying ( sound . stream . buffer ) ;
2014-01-23 12:36:18 +01:00
}
2014-04-19 16:36:49 +02:00
// Set volume for a sound
void SetSoundVolume ( Sound sound , float volume )
{
2019-07-24 22:37:24 +02:00
SetAudioBufferVolume ( sound . stream . buffer , volume ) ;
2014-04-19 16:36:49 +02:00
}
// Set pitch for a sound
void SetSoundPitch ( Sound sound , float pitch )
{
2019-07-24 22:37:24 +02:00
SetAudioBufferPitch ( sound . stream . buffer , pitch ) ;
2014-04-19 16:36:49 +02:00
}
2016-09-08 00:20:06 +02:00
// Convert wave data to desired format
void WaveFormat ( Wave * wave , int sampleRate , int sampleSize , int channels )
{
2020-11-19 20:11:11 +01:00
ma_format formatIn = ( ( wave - > sampleSize = = 8 ) ? ma_format_u8 : ( ( wave - > sampleSize = = 16 ) ? ma_format_s16 : ma_format_f32 ) ) ;
ma_format formatOut = ( ( sampleSize = = 8 ) ? ma_format_u8 : ( ( sampleSize = = 16 ) ? ma_format_s16 : ma_format_f32 ) ) ;
2017-11-12 14:17:05 +10:00
2020-11-19 20:11:11 +01:00
ma_uint32 frameCountIn = wave - > sampleCount / wave - > channels ;
2017-11-12 14:17:05 +10:00
2020-02-04 22:43:31 +10:00
ma_uint32 frameCount = ( ma_uint32 ) ma_convert_frames ( NULL , 0 , formatOut , channels , sampleRate , NULL , frameCountIn , formatIn , wave - > channels , wave - > sampleRate ) ;
2018-11-06 15:10:50 +01:00
if ( frameCount = = 0 )
2017-12-20 11:37:43 +01:00
{
2020-03-27 17:16:30 +01:00
TRACELOG ( LOG_WARNING , " WAVE: Failed to get frame count for format conversion " ) ;
2017-11-12 14:17:05 +10:00
return ;
}
2019-04-23 14:55:35 +02:00
void * data = RL_MALLOC ( frameCount * channels * ( sampleSize / 8 ) ) ;
2017-11-12 14:17:05 +10:00
2020-02-04 22:43:31 +10:00
frameCount = ( ma_uint32 ) ma_convert_frames ( data , frameCount , formatOut , channels , sampleRate , wave - > data , frameCountIn , formatIn , wave - > channels , wave - > sampleRate ) ;
2018-11-06 15:10:50 +01:00
if ( frameCount = = 0 )
2017-12-20 11:37:43 +01:00
{
2020-03-27 17:16:30 +01:00
TRACELOG ( LOG_WARNING , " WAVE: Failed format conversion " ) ;
2017-11-12 14:17:05 +10:00
return ;
}
2020-11-19 20:11:11 +01:00
wave - > sampleCount = frameCount * channels ;
2017-11-12 14:17:05 +10:00
wave - > sampleSize = sampleSize ;
wave - > sampleRate = sampleRate ;
wave - > channels = channels ;
2019-04-23 14:55:35 +02:00
RL_FREE ( wave - > data ) ;
2017-11-12 14:17:05 +10:00
wave - > data = data ;
2016-09-08 00:20:06 +02:00
}
// Copy a wave to a new wave
Wave WaveCopy ( Wave wave )
{
2016-12-25 01:58:56 +01:00
Wave newWave = { 0 } ;
2016-09-08 00:20:06 +02:00
2020-11-19 20:11:11 +01:00
newWave . data = RL_MALLOC ( wave . sampleCount * wave . sampleSize / 8 ) ;
2016-09-08 00:20:06 +02:00
if ( newWave . data ! = NULL )
{
// NOTE: Size must be provided in bytes
2020-11-19 20:11:11 +01:00
memcpy ( newWave . data , wave . data , wave . sampleCount * wave . sampleSize / 8 ) ;
2016-09-08 00:20:06 +02:00
newWave . sampleCount = wave . sampleCount ;
newWave . sampleRate = wave . sampleRate ;
newWave . sampleSize = wave . sampleSize ;
newWave . channels = wave . channels ;
}
return newWave ;
}
// Crop a wave to defined samples range
// NOTE: Security check in case of out-of-range
void WaveCrop ( Wave * wave , int initSample , int finalSample )
{
2017-01-28 23:02:30 +01:00
if ( ( initSample > = 0 ) & & ( initSample < finalSample ) & &
2018-05-21 20:46:22 +10:00
( finalSample > 0 ) & & ( ( unsigned int ) finalSample < wave - > sampleCount ) )
2016-09-08 01:03:05 +02:00
{
2016-12-25 01:58:56 +01:00
int sampleCount = finalSample - initSample ;
2017-01-28 23:02:30 +01:00
2020-11-19 20:11:11 +01:00
void * data = RL_MALLOC ( sampleCount * wave - > sampleSize / 8 ) ;
2017-01-28 23:02:30 +01:00
2020-11-19 20:11:11 +01:00
memcpy ( data , ( unsigned char * ) wave - > data + ( initSample * wave - > channels * wave - > sampleSize / 8 ) , sampleCount * wave - > sampleSize / 8 ) ;
2017-01-28 23:02:30 +01:00
2019-04-23 14:55:35 +02:00
RL_FREE ( wave - > data ) ;
2016-12-25 01:58:56 +01:00
wave - > data = data ;
2016-09-08 01:03:05 +02:00
}
2020-03-27 17:16:30 +01:00
else TRACELOG ( LOG_WARNING , " WAVE: Crop range out of bounds " ) ;
2016-09-08 00:20:06 +02:00
}
2020-12-18 21:03:08 +01:00
// Load samples data from wave as a floats array
// NOTE 1: Returned sample values are normalized to range [-1..1]
// NOTE 2: Sample data allocated should be freed with UnloadWaveSamples()
float * LoadWaveSamples ( Wave wave )
2016-09-08 00:20:06 +02:00
{
2020-11-19 20:11:11 +01:00
float * samples = ( float * ) RL_MALLOC ( wave . sampleCount * sizeof ( float ) ) ;
2017-01-28 23:02:30 +01:00
2020-11-19 20:11:11 +01:00
// NOTE: sampleCount is the total number of interlaced samples (including channels)
2020-12-23 15:03:26 +01:00
2018-05-21 20:46:22 +10:00
for ( unsigned int i = 0 ; i < wave . sampleCount ; i + + )
2016-09-08 00:20:06 +02:00
{
2021-02-14 16:37:34 +00:00
if ( wave . sampleSize = = 8 ) samples [ i ] = ( float ) ( ( ( unsigned char * ) wave . data ) [ i ] - 127 ) / 256.0f ;
else if ( wave . sampleSize = = 16 ) samples [ i ] = ( float ) ( ( ( short * ) wave . data ) [ i ] ) / 32767.0f ;
else if ( wave . sampleSize = = 32 ) samples [ i ] = ( ( float * ) wave . data ) [ i ] ;
2016-09-08 00:20:06 +02:00
}
2017-01-28 23:02:30 +01:00
2016-09-08 00:20:06 +02:00
return samples ;
}
2020-12-18 21:03:08 +01:00
// Unload samples data loaded with LoadWaveSamples()
void UnloadWaveSamples ( float * samples )
{
RL_FREE ( samples ) ;
}
2014-04-19 16:36:49 +02:00
//----------------------------------------------------------------------------------
// Module Functions Definition - Music loading and stream playing (.OGG)
//----------------------------------------------------------------------------------
2016-08-01 12:49:17 +02:00
// Load music stream from file
2016-09-08 00:20:06 +02:00
Music LoadMusicStream ( const char * fileName )
2016-07-29 21:35:57 +02:00
{
2019-07-26 10:26:39 +02:00
Music music = { 0 } ;
2019-07-24 22:37:24 +02:00
bool musicLoaded = false ;
2016-07-29 21:35:57 +02:00
2019-07-24 22:37:24 +02:00
if ( false ) { }
2020-05-23 23:19:59 +02:00
# if defined(SUPPORT_FILEFORMAT_WAV)
else if ( IsFileExtension ( fileName , " .wav " ) )
{
2021-02-14 16:37:34 +00:00
drwav * ctxWav = RL_CALLOC ( 1 , sizeof ( drwav ) ) ;
2020-05-23 23:19:59 +02:00
bool success = drwav_init_file ( ctxWav , fileName , NULL ) ;
2021-02-14 16:37:34 +00:00
music . ctxType = MUSIC_AUDIO_WAV ;
music . ctxData = ctxWav ;
2020-05-23 23:19:59 +02:00
if ( success )
{
2020-08-11 19:08:07 +02:00
int sampleSize = ctxWav - > bitsPerSample ;
if ( ctxWav - > bitsPerSample = = 24 ) sampleSize = 16 ; // Forcing conversion to s16 on UpdateMusicStream()
2020-05-23 23:19:59 +02:00
2021-06-03 23:36:47 +02:00
music . stream = LoadAudioStream ( ctxWav - > sampleRate , sampleSize , ctxWav - > channels ) ;
2020-05-23 23:19:59 +02:00
music . sampleCount = ( unsigned int ) ctxWav - > totalPCMFrameCount * ctxWav - > channels ;
music . looping = true ; // Looping enabled by default
musicLoaded = true ;
}
}
# endif
2019-02-14 12:32:23 +02:00
# if defined(SUPPORT_FILEFORMAT_OGG)
2019-07-24 22:37:24 +02:00
else if ( IsFileExtension ( fileName , " .ogg " ) )
2016-07-29 21:35:57 +02:00
{
2016-08-01 12:49:17 +02:00
// Open ogg audio stream
2021-02-14 16:37:34 +00:00
music . ctxType = MUSIC_AUDIO_OGG ;
2019-07-26 10:26:39 +02:00
music . ctxData = stb_vorbis_open_filename ( fileName , NULL , NULL ) ;
2016-07-29 21:35:57 +02:00
2019-07-26 10:26:39 +02:00
if ( music . ctxData ! = NULL )
2014-04-19 16:36:49 +02:00
{
2019-07-26 10:26:39 +02:00
stb_vorbis_info info = stb_vorbis_get_info ( ( stb_vorbis * ) music . ctxData ) ; // Get Ogg file info
2016-07-29 21:35:57 +02:00
2016-12-25 01:58:56 +01:00
// OGG bit rate defaults to 16 bit, it's enough for compressed format
2021-06-03 23:36:47 +02:00
music . stream = LoadAudioStream ( info . sample_rate , 16 , info . channels ) ;
2020-12-23 15:03:26 +01:00
2020-11-19 20:11:11 +01:00
// WARNING: It seems this function returns length in frames, not samples, so we multiply by channels
2019-07-26 10:26:39 +02:00
music . sampleCount = ( unsigned int ) stb_vorbis_stream_length_in_samples ( ( stb_vorbis * ) music . ctxData ) * info . channels ;
2020-05-14 14:00:37 +02:00
music . looping = true ; // Looping enabled by default
2019-07-24 22:37:24 +02:00
musicLoaded = true ;
2014-04-19 16:36:49 +02:00
}
}
2019-02-14 12:32:23 +02:00
# endif
2017-03-26 22:49:01 +02:00
# if defined(SUPPORT_FILEFORMAT_FLAC)
2017-03-29 00:35:42 +02:00
else if ( IsFileExtension ( fileName , " .flac " ) )
2016-10-10 18:22:55 +02:00
{
2021-02-14 16:37:34 +00:00
music . ctxType = MUSIC_AUDIO_FLAC ;
2020-11-16 12:20:50 +01:00
music . ctxData = drflac_open_file ( fileName , NULL ) ;
2017-01-28 23:02:30 +01:00
2019-07-26 10:26:39 +02:00
if ( music . ctxData ! = NULL )
2016-10-10 18:22:55 +02:00
{
2019-07-26 10:26:39 +02:00
drflac * ctxFlac = ( drflac * ) music . ctxData ;
2019-07-24 22:37:24 +02:00
2021-06-03 23:36:47 +02:00
music . stream = LoadAudioStream ( ctxFlac - > sampleRate , ctxFlac - > bitsPerSample , ctxFlac - > channels ) ;
2020-11-19 20:11:11 +01:00
music . sampleCount = ( unsigned int ) ctxFlac - > totalPCMFrameCount * ctxFlac - > channels ;
2020-05-14 14:00:37 +02:00
music . looping = true ; // Looping enabled by default
2019-07-24 22:37:24 +02:00
musicLoaded = true ;
2016-10-10 18:22:55 +02:00
}
}
2017-03-26 22:49:01 +02:00
# endif
2018-05-17 00:04:58 +02:00
# if defined(SUPPORT_FILEFORMAT_MP3)
else if ( IsFileExtension ( fileName , " .mp3 " ) )
{
2021-02-14 16:37:34 +00:00
drmp3 * ctxMp3 = RL_CALLOC ( 1 , sizeof ( drmp3 ) ) ;
2019-07-23 22:21:01 +02:00
int result = drmp3_init_file ( ctxMp3 , fileName , NULL ) ;
2018-05-17 00:04:58 +02:00
2021-02-14 16:37:34 +00:00
music . ctxType = MUSIC_AUDIO_MP3 ;
music . ctxData = ctxMp3 ;
2019-07-24 22:37:24 +02:00
if ( result > 0 )
2018-05-17 00:04:58 +02:00
{
2021-06-03 23:36:47 +02:00
music . stream = LoadAudioStream ( ctxMp3 - > sampleRate , 32 , ctxMp3 - > channels ) ;
2020-02-04 22:43:31 +10:00
music . sampleCount = ( unsigned int ) drmp3_get_pcm_frame_count ( ctxMp3 ) * ctxMp3 - > channels ;
2020-05-14 14:00:37 +02:00
music . looping = true ; // Looping enabled by default
2019-07-24 22:37:24 +02:00
musicLoaded = true ;
2018-05-17 00:04:58 +02:00
}
}
# endif
2017-03-26 22:49:01 +02:00
# if defined(SUPPORT_FILEFORMAT_XM)
2017-03-29 00:35:42 +02:00
else if ( IsFileExtension ( fileName , " .xm " ) )
2016-04-24 18:18:18 -07:00
{
2019-07-23 22:21:01 +02:00
jar_xm_context_t * ctxXm = NULL ;
2021-03-21 14:07:55 -07:00
int result = jar_xm_create_context_from_file ( & ctxXm , AUDIO . System . device . sampleRate , fileName ) ;
2016-08-16 11:09:55 +02:00
2021-02-14 16:37:34 +00:00
music . ctxType = MUSIC_MODULE_XM ;
music . ctxData = ctxXm ;
2020-02-03 18:31:30 +01:00
if ( result = = 0 ) // XM AUDIO.System.context created successfully
2016-04-24 18:18:18 -07:00
{
2019-07-24 22:37:24 +02:00
jar_xm_set_max_loop_count ( ctxXm , 0 ) ; // Set infinite number of loops
2016-08-01 12:49:17 +02:00
2021-04-09 02:00:21 -07:00
unsigned int bits = 32 ;
if ( AUDIO_DEVICE_FORMAT = = ma_format_s16 )
bits = 16 ;
else if ( AUDIO_DEVICE_FORMAT = = ma_format_u8 )
bits = 8 ;
2021-04-18 23:50:32 +02:00
2016-08-01 12:49:17 +02:00
// NOTE: Only stereo is supported for XM
2021-06-03 23:36:47 +02:00
music . stream = LoadAudioStream ( AUDIO . System . device . sampleRate , bits , AUDIO_DEVICE_CHANNELS ) ;
2020-11-19 20:11:11 +01:00
music . sampleCount = ( unsigned int ) jar_xm_get_remaining_samples ( ctxXm ) * 2 ; // 2 channels
2020-05-14 14:00:37 +02:00
music . looping = true ; // Looping enabled by default
2019-12-21 04:02:54 -08:00
jar_xm_reset ( ctxXm ) ; // make sure we start at the beginning of the song
2019-07-24 22:37:24 +02:00
musicLoaded = true ;
2016-05-11 20:15:37 -07:00
}
}
2017-03-26 22:49:01 +02:00
# endif
# if defined(SUPPORT_FILEFORMAT_MOD)
2017-03-29 00:35:42 +02:00
else if ( IsFileExtension ( fileName , " .mod " ) )
2016-06-01 20:09:00 -07:00
{
2021-02-14 16:37:34 +00:00
jar_mod_context_t * ctxMod = RL_CALLOC ( 1 , sizeof ( jar_mod_context_t ) ) ;
2019-07-23 22:21:01 +02:00
jar_mod_init ( ctxMod ) ;
2019-07-24 22:37:24 +02:00
int result = jar_mod_load_file ( ctxMod , fileName ) ;
2016-07-29 21:35:57 +02:00
2021-02-14 16:37:34 +00:00
music . ctxType = MUSIC_MODULE_MOD ;
music . ctxData = ctxMod ;
2019-07-24 22:37:24 +02:00
if ( result > 0 )
2016-06-01 20:09:00 -07:00
{
2018-12-25 15:18:35 +01:00
// NOTE: Only stereo is supported for MOD
2021-06-03 23:36:47 +02:00
music . stream = LoadAudioStream ( AUDIO . System . device . sampleRate , 16 , AUDIO_DEVICE_CHANNELS ) ;
2020-11-19 20:11:11 +01:00
music . sampleCount = ( unsigned int ) jar_mod_max_samples ( ctxMod ) * 2 ; // 2 channels
2020-05-14 14:00:37 +02:00
music . looping = true ; // Looping enabled by default
2019-07-24 22:37:24 +02:00
musicLoaded = true ;
2016-06-01 20:09:00 -07:00
}
}
2017-03-26 22:49:01 +02:00
# endif
2020-04-06 19:26:09 +10:00
else TRACELOG ( LOG_WARNING , " STREAM: [%s] Fileformat not supported " , fileName ) ;
2020-11-03 23:47:33 +01:00
2018-07-28 18:07:06 +02:00
if ( ! musicLoaded )
{
2019-07-24 22:37:24 +02:00
if ( false ) { }
2020-05-23 23:19:59 +02:00
# if defined(SUPPORT_FILEFORMAT_WAV)
else if ( music . ctxType = = MUSIC_AUDIO_WAV ) drwav_uninit ( ( drwav * ) music . ctxData ) ;
# endif
2019-02-14 12:32:23 +02:00
# if defined(SUPPORT_FILEFORMAT_OGG)
2019-07-26 10:26:39 +02:00
else if ( music . ctxType = = MUSIC_AUDIO_OGG ) stb_vorbis_close ( ( stb_vorbis * ) music . ctxData ) ;
2019-02-14 12:32:23 +02:00
# endif
2018-07-28 18:07:06 +02:00
# if defined(SUPPORT_FILEFORMAT_FLAC)
2020-11-16 12:20:50 +01:00
else if ( music . ctxType = = MUSIC_AUDIO_FLAC ) drflac_free ( ( drflac * ) music . ctxData , NULL ) ;
2018-07-28 18:07:06 +02:00
# endif
# if defined(SUPPORT_FILEFORMAT_MP3)
2019-07-26 10:26:39 +02:00
else if ( music . ctxType = = MUSIC_AUDIO_MP3 ) { drmp3_uninit ( ( drmp3 * ) music . ctxData ) ; RL_FREE ( music . ctxData ) ; }
2018-07-28 18:07:06 +02:00
# endif
# if defined(SUPPORT_FILEFORMAT_XM)
2019-07-26 10:26:39 +02:00
else if ( music . ctxType = = MUSIC_MODULE_XM ) jar_xm_free_context ( ( jar_xm_context_t * ) music . ctxData ) ;
2018-07-28 18:07:06 +02:00
# endif
# if defined(SUPPORT_FILEFORMAT_MOD)
2019-07-26 10:26:39 +02:00
else if ( music . ctxType = = MUSIC_MODULE_MOD ) { jar_mod_unload ( ( jar_mod_context_t * ) music . ctxData ) ; RL_FREE ( music . ctxData ) ; }
2018-07-28 18:07:06 +02:00
# endif
2021-02-14 16:37:34 +00:00
music . ctxData = NULL ;
2020-03-27 17:16:30 +01:00
TRACELOG ( LOG_WARNING , " FILEIO: [%s] Music file could not be opened " , fileName ) ;
2018-07-28 18:07:06 +02:00
}
2019-09-03 23:08:02 +02:00
else
{
// Show some music stream info
2020-03-27 17:16:30 +01:00
TRACELOG ( LOG_INFO , " FILEIO: [%s] Music file successfully loaded: " , fileName ) ;
TRACELOG ( LOG_INFO , " > Total samples: %i " , music . sampleCount ) ;
2020-03-27 18:49:21 +01:00
TRACELOG ( LOG_INFO , " > Sample rate: %i Hz " , music . stream . sampleRate ) ;
TRACELOG ( LOG_INFO , " > Sample size: %i bits " , music . stream . sampleSize ) ;
TRACELOG ( LOG_INFO , " > Channels: %i (%s) " , music . stream . channels , ( music . stream . channels = = 1 ) ? " Mono " : ( music . stream . channels = = 2 ) ? " Stereo " : " Multi " ) ;
2019-09-03 23:08:02 +02:00
}
2016-07-29 21:35:57 +02:00
2016-08-01 12:49:17 +02:00
return music ;
2014-04-19 16:36:49 +02:00
}
2021-02-22 20:45:52 +02:00
// extension including period ".mod"
Music LoadMusicStreamFromMemory ( const char * fileType , unsigned char * data , int dataSize )
{
Music music = { 0 } ;
bool musicLoaded = false ;
2021-03-19 19:43:44 +01:00
2021-02-22 20:45:52 +02:00
char fileExtLower [ 16 ] = { 0 } ;
strcpy ( fileExtLower , TextToLower ( fileType ) ) ;
2021-03-19 19:43:44 +01:00
2021-02-22 20:45:52 +02:00
if ( false ) { }
2021-02-24 10:22:21 +02:00
# if defined(SUPPORT_FILEFORMAT_WAV)
else if ( TextIsEqual ( fileExtLower , " .wav " ) )
{
drwav * ctxWav = RL_CALLOC ( 1 , sizeof ( drwav ) ) ;
2021-04-01 20:24:33 +02:00
2021-02-24 10:22:21 +02:00
bool success = drwav_init_memory ( ctxWav , ( const void * ) data , dataSize , NULL ) ;
2021-03-19 19:43:44 +01:00
2021-02-24 10:22:21 +02:00
music . ctxType = MUSIC_AUDIO_WAV ;
music . ctxData = ctxWav ;
2021-03-19 19:43:44 +01:00
2021-02-24 10:22:21 +02:00
if ( success )
{
int sampleSize = ctxWav - > bitsPerSample ;
if ( ctxWav - > bitsPerSample = = 24 ) sampleSize = 16 ; // Forcing conversion to s16 on UpdateMusicStream()
2021-06-03 23:36:47 +02:00
music . stream = LoadAudioStream ( ctxWav - > sampleRate , sampleSize , ctxWav - > channels ) ;
2021-02-24 10:22:21 +02:00
music . sampleCount = ( unsigned int ) ctxWav - > totalPCMFrameCount * ctxWav - > channels ;
music . looping = true ; // Looping enabled by default
musicLoaded = true ;
}
}
# endif
# if defined(SUPPORT_FILEFORMAT_FLAC)
else if ( TextIsEqual ( fileExtLower , " .flac " ) )
{
music . ctxType = MUSIC_AUDIO_FLAC ;
music . ctxData = drflac_open_memory ( ( const void * ) data , dataSize , NULL ) ;
if ( music . ctxData ! = NULL )
{
drflac * ctxFlac = ( drflac * ) music . ctxData ;
2021-06-03 23:36:47 +02:00
music . stream = LoadAudioStream ( ctxFlac - > sampleRate , ctxFlac - > bitsPerSample , ctxFlac - > channels ) ;
2021-02-24 10:22:21 +02:00
music . sampleCount = ( unsigned int ) ctxFlac - > totalPCMFrameCount * ctxFlac - > channels ;
music . looping = true ; // Looping enabled by default
musicLoaded = true ;
}
}
# endif
# if defined(SUPPORT_FILEFORMAT_MP3)
else if ( TextIsEqual ( fileExtLower , " .mp3 " ) )
{
drmp3 * ctxMp3 = RL_CALLOC ( 1 , sizeof ( drmp3 ) ) ;
int success = drmp3_init_memory ( ctxMp3 , ( const void * ) data , dataSize , NULL ) ;
music . ctxType = MUSIC_AUDIO_MP3 ;
music . ctxData = ctxMp3 ;
if ( success )
{
2021-06-03 23:36:47 +02:00
music . stream = LoadAudioStream ( ctxMp3 - > sampleRate , 32 , ctxMp3 - > channels ) ;
2021-02-24 10:22:21 +02:00
music . sampleCount = ( unsigned int ) drmp3_get_pcm_frame_count ( ctxMp3 ) * ctxMp3 - > channels ;
music . looping = true ; // Looping enabled by default
musicLoaded = true ;
}
}
# endif
2021-02-25 12:55:08 +02:00
# if defined(SUPPORT_FILEFORMAT_OGG)
else if ( TextIsEqual ( fileExtLower , " .ogg " ) )
{
// Open ogg audio stream
music . ctxType = MUSIC_AUDIO_OGG ;
//music.ctxData = stb_vorbis_open_filename(fileName, NULL, NULL);
music . ctxData = stb_vorbis_open_memory ( ( const unsigned char * ) data , dataSize , NULL , NULL ) ;
if ( music . ctxData ! = NULL )
{
stb_vorbis_info info = stb_vorbis_get_info ( ( stb_vorbis * ) music . ctxData ) ; // Get Ogg file info
// OGG bit rate defaults to 16 bit, it's enough for compressed format
2021-06-03 23:36:47 +02:00
music . stream = LoadAudioStream ( info . sample_rate , 16 , info . channels ) ;
2021-02-25 12:55:08 +02:00
// WARNING: It seems this function returns length in frames, not samples, so we multiply by channels
music . sampleCount = ( unsigned int ) stb_vorbis_stream_length_in_samples ( ( stb_vorbis * ) music . ctxData ) * info . channels ;
music . looping = true ; // Looping enabled by default
musicLoaded = true ;
}
}
# endif
2021-02-22 20:45:52 +02:00
# if defined(SUPPORT_FILEFORMAT_XM)
2021-02-24 10:22:21 +02:00
else if ( TextIsEqual ( fileExtLower , " .xm " ) )
2021-02-22 20:45:52 +02:00
{
jar_xm_context_t * ctxXm = NULL ;
2021-03-21 14:07:55 -07:00
int result = jar_xm_create_context_safe ( & ctxXm , ( const char * ) data , dataSize , AUDIO . System . device . sampleRate ) ;
2021-02-22 20:45:52 +02:00
if ( result = = 0 ) // XM AUDIO.System.context created successfully
{
music . ctxType = MUSIC_MODULE_XM ;
jar_xm_set_max_loop_count ( ctxXm , 0 ) ; // Set infinite number of loops
2021-04-09 02:00:21 -07:00
unsigned int bits = 32 ;
if ( AUDIO_DEVICE_FORMAT = = ma_format_s16 )
bits = 16 ;
else if ( AUDIO_DEVICE_FORMAT = = ma_format_u8 )
bits = 8 ;
2021-02-22 20:45:52 +02:00
// NOTE: Only stereo is supported for XM
2021-06-03 23:36:47 +02:00
music . stream = LoadAudioStream ( AUDIO . System . device . sampleRate , bits , 2 ) ;
2021-02-22 20:45:52 +02:00
music . sampleCount = ( unsigned int ) jar_xm_get_remaining_samples ( ctxXm ) * 2 ; // 2 channels
music . looping = true ; // Looping enabled by default
jar_xm_reset ( ctxXm ) ; // make sure we start at the beginning of the song
music . ctxData = ctxXm ;
musicLoaded = true ;
}
}
# endif
# if defined(SUPPORT_FILEFORMAT_MOD)
else if ( TextIsEqual ( fileExtLower , " .mod " ) )
{
2021-05-30 11:50:32 +02:00
jar_mod_context_t * ctxMod = ( jar_mod_context_t * ) RL_MALLOC ( sizeof ( jar_mod_context_t ) ) ;
2021-02-22 20:45:52 +02:00
int result = 0 ;
jar_mod_init ( ctxMod ) ;
2021-03-19 19:43:44 +01:00
2021-05-30 11:50:32 +02:00
// Copy data to allocated memory for default UnloadMusicStream
unsigned char * newData = ( unsigned char * ) RL_MALLOC ( dataSize ) ;
2021-03-22 20:41:33 +01:00
int it = dataSize / sizeof ( unsigned char ) ;
2021-02-22 20:45:52 +02:00
for ( int i = 0 ; i < it ; i + + ) {
newData [ i ] = data [ i ] ;
}
2021-03-19 19:43:44 +01:00
2021-02-22 20:45:52 +02:00
// Memory loaded version for jar_mod_load_file()
if ( dataSize & & dataSize < 32 * 1024 * 1024 )
{
ctxMod - > modfilesize = dataSize ;
ctxMod - > modfile = newData ;
2021-03-31 17:55:46 +02:00
if ( jar_mod_load ( ctxMod , ( void * ) ctxMod - > modfile , dataSize ) ) result = dataSize ;
2021-02-22 20:45:52 +02:00
}
2021-03-19 19:43:44 +01:00
2021-02-22 20:45:52 +02:00
if ( result > 0 )
{
music . ctxType = MUSIC_MODULE_MOD ;
// NOTE: Only stereo is supported for MOD
2021-06-03 23:36:47 +02:00
music . stream = LoadAudioStream ( AUDIO . System . device . sampleRate , 16 , 2 ) ;
2021-02-22 20:45:52 +02:00
music . sampleCount = ( unsigned int ) jar_mod_max_samples ( ctxMod ) * 2 ; // 2 channels
music . looping = true ; // Looping enabled by default
musicLoaded = true ;
music . ctxData = ctxMod ;
musicLoaded = true ;
}
}
2021-03-19 19:43:44 +01:00
# endif
2021-02-22 20:45:52 +02:00
else TRACELOG ( LOG_WARNING , " STREAM: [%s] Fileformat not supported " , fileType ) ;
if ( ! musicLoaded )
{
if ( false ) { }
2021-02-24 10:22:21 +02:00
# if defined(SUPPORT_FILEFORMAT_WAV)
else if ( music . ctxType = = MUSIC_AUDIO_WAV ) drwav_uninit ( ( drwav * ) music . ctxData ) ;
# endif
# if defined(SUPPORT_FILEFORMAT_FLAC)
else if ( music . ctxType = = MUSIC_AUDIO_FLAC ) drflac_free ( ( drflac * ) music . ctxData , NULL ) ;
# endif
# if defined(SUPPORT_FILEFORMAT_MP3)
else if ( music . ctxType = = MUSIC_AUDIO_MP3 ) { drmp3_uninit ( ( drmp3 * ) music . ctxData ) ; RL_FREE ( music . ctxData ) ; }
2021-02-25 12:55:08 +02:00
# endif
# if defined(SUPPORT_FILEFORMAT_OGG)
else if ( music . ctxType = = MUSIC_AUDIO_OGG ) stb_vorbis_close ( ( stb_vorbis * ) music . ctxData ) ;
2021-02-24 10:22:21 +02:00
# endif
2021-03-22 20:41:33 +01:00
# if defined(SUPPORT_FILEFORMAT_XM)
2021-02-22 20:45:52 +02:00
else if ( music . ctxType = = MUSIC_MODULE_XM ) jar_xm_free_context ( ( jar_xm_context_t * ) music . ctxData ) ;
# endif
# if defined(SUPPORT_FILEFORMAT_MOD)
else if ( music . ctxType = = MUSIC_MODULE_MOD ) { jar_mod_unload ( ( jar_mod_context_t * ) music . ctxData ) ; RL_FREE ( music . ctxData ) ; }
# endif
music . ctxData = NULL ;
TRACELOG ( LOG_WARNING , " FILEIO: [%s] Music memory could not be opened " , fileType ) ;
}
else
{
// Show some music stream info
TRACELOG ( LOG_INFO , " FILEIO: [%s] Music memory successfully loaded: " , fileType ) ;
TRACELOG ( LOG_INFO , " > Total samples: %i " , music . sampleCount ) ;
TRACELOG ( LOG_INFO , " > Sample rate: %i Hz " , music . stream . sampleRate ) ;
TRACELOG ( LOG_INFO , " > Sample size: %i bits " , music . stream . sampleSize ) ;
TRACELOG ( LOG_INFO , " > Channels: %i (%s) " , music . stream . channels , ( music . stream . channels = = 1 ) ? " Mono " : ( music . stream . channels = = 2 ) ? " Stereo " : " Multi " ) ;
}
return music ;
}
2016-08-01 12:49:17 +02:00
// Unload music stream
void UnloadMusicStream ( Music music )
2016-07-29 21:35:57 +02:00
{
2021-06-03 23:36:47 +02:00
UnloadAudioStream ( music . stream ) ;
2016-08-16 11:09:55 +02:00
2021-02-14 16:37:34 +00:00
if ( music . ctxData ! = NULL )
{
if ( false ) { }
2020-05-23 23:19:59 +02:00
# if defined(SUPPORT_FILEFORMAT_WAV)
2021-02-14 16:37:34 +00:00
else if ( music . ctxType = = MUSIC_AUDIO_WAV ) drwav_uninit ( ( drwav * ) music . ctxData ) ;
2020-05-23 23:19:59 +02:00
# endif
2019-02-14 12:32:23 +02:00
# if defined(SUPPORT_FILEFORMAT_OGG)
2021-02-14 16:37:34 +00:00
else if ( music . ctxType = = MUSIC_AUDIO_OGG ) stb_vorbis_close ( ( stb_vorbis * ) music . ctxData ) ;
2019-02-14 12:32:23 +02:00
# endif
2017-03-26 22:49:01 +02:00
# if defined(SUPPORT_FILEFORMAT_FLAC)
2021-02-14 16:37:34 +00:00
else if ( music . ctxType = = MUSIC_AUDIO_FLAC ) drflac_free ( ( drflac * ) music . ctxData , NULL ) ;
2017-03-26 22:49:01 +02:00
# endif
2018-05-17 00:04:58 +02:00
# if defined(SUPPORT_FILEFORMAT_MP3)
2019-07-26 10:26:39 +02:00
else if ( music . ctxType = = MUSIC_AUDIO_MP3 ) { drmp3_uninit ( ( drmp3 * ) music . ctxData ) ; RL_FREE ( music . ctxData ) ; }
2018-05-17 00:04:58 +02:00
# endif
2017-03-26 22:49:01 +02:00
# if defined(SUPPORT_FILEFORMAT_XM)
2021-02-14 16:37:34 +00:00
else if ( music . ctxType = = MUSIC_MODULE_XM ) jar_xm_free_context ( ( jar_xm_context_t * ) music . ctxData ) ;
2017-03-26 22:49:01 +02:00
# endif
# if defined(SUPPORT_FILEFORMAT_MOD)
2021-02-14 16:37:34 +00:00
else if ( music . ctxType = = MUSIC_MODULE_MOD ) { jar_mod_unload ( ( jar_mod_context_t * ) music . ctxData ) ; RL_FREE ( music . ctxData ) ; }
2017-03-26 22:49:01 +02:00
# endif
2021-02-14 16:37:34 +00:00
}
2016-08-01 12:49:17 +02:00
}
2016-07-29 21:35:57 +02:00
2016-08-01 12:49:17 +02:00
// Start music playing (open stream)
void PlayMusicStream ( Music music )
{
2020-02-18 16:30:52 +01:00
if ( music . stream . buffer ! = NULL )
2019-07-26 10:26:39 +02:00
{
2019-09-03 23:08:02 +02:00
// For music streams, we need to make sure we maintain the frame cursor position
// This is a hack for this section of code in UpdateMusicStream()
2019-10-17 17:18:03 +02:00
// NOTE: In case window is minimized, music stream is stopped, just make sure to
2021-05-20 19:27:04 +02:00
// play again on window restore: if (IsMusicStreamPlaying(music)) PlayMusicStream(music);
2020-02-18 16:30:52 +01:00
ma_uint32 frameCursorPos = music . stream . buffer - > frameCursorPos ;
2019-09-03 23:08:02 +02:00
PlayAudioStream ( music . stream ) ; // WARNING: This resets the cursor position.
2020-02-18 16:30:52 +01:00
music . stream . buffer - > frameCursorPos = frameCursorPos ;
2019-07-26 10:26:39 +02:00
}
2016-07-29 21:35:57 +02:00
}
2016-08-01 12:49:17 +02:00
// Pause music playing
void PauseMusicStream ( Music music )
2014-04-19 16:36:49 +02:00
{
2019-07-26 10:26:39 +02:00
PauseAudioStream ( music . stream ) ;
2016-08-01 12:49:17 +02:00
}
2016-07-29 21:35:57 +02:00
2016-08-01 12:49:17 +02:00
// Resume music playing
void ResumeMusicStream ( Music music )
{
2019-07-26 10:26:39 +02:00
ResumeAudioStream ( music . stream ) ;
2016-08-01 12:49:17 +02:00
}
2016-07-29 21:35:57 +02:00
2016-08-01 12:49:17 +02:00
// Stop music playing (close stream)
void StopMusicStream ( Music music )
{
2019-07-26 10:26:39 +02:00
StopAudioStream ( music . stream ) ;
2018-11-06 15:10:50 +01:00
2019-07-26 10:26:39 +02:00
switch ( music . ctxType )
2016-09-15 11:53:16 +02:00
{
2020-05-23 23:19:59 +02:00
# if defined(SUPPORT_FILEFORMAT_WAV)
case MUSIC_AUDIO_WAV : drwav_seek_to_pcm_frame ( ( drwav * ) music . ctxData , 0 ) ; break ;
# endif
2019-02-14 12:32:23 +02:00
# if defined(SUPPORT_FILEFORMAT_OGG)
2019-07-26 10:26:39 +02:00
case MUSIC_AUDIO_OGG : stb_vorbis_seek_start ( ( stb_vorbis * ) music . ctxData ) ; break ;
2019-02-14 12:32:23 +02:00
# endif
2017-03-26 22:49:01 +02:00
# if defined(SUPPORT_FILEFORMAT_FLAC)
2019-09-03 23:24:09 +02:00
case MUSIC_AUDIO_FLAC : drflac_seek_to_pcm_frame ( ( drflac * ) music . ctxData , 0 ) ; break ;
2017-03-26 22:49:01 +02:00
# endif
2018-09-19 15:57:46 +02:00
# if defined(SUPPORT_FILEFORMAT_MP3)
2019-07-26 10:26:39 +02:00
case MUSIC_AUDIO_MP3 : drmp3_seek_to_pcm_frame ( ( drmp3 * ) music . ctxData , 0 ) ; break ;
2018-09-19 15:57:46 +02:00
# endif
2017-03-26 22:49:01 +02:00
# if defined(SUPPORT_FILEFORMAT_XM)
2019-07-26 10:26:39 +02:00
case MUSIC_MODULE_XM : jar_xm_reset ( ( jar_xm_context_t * ) music . ctxData ) ; break ;
2017-03-26 22:49:01 +02:00
# endif
# if defined(SUPPORT_FILEFORMAT_MOD)
2019-07-26 10:26:39 +02:00
case MUSIC_MODULE_MOD : jar_mod_seek_start ( ( jar_mod_context_t * ) music . ctxData ) ; break ;
2017-03-26 22:49:01 +02:00
# endif
2016-09-15 11:53:16 +02:00
default : break ;
}
2016-05-11 00:37:10 -07:00
}
2014-09-03 16:51:28 +02:00
2016-07-15 18:16:34 +02:00
// Update (re-fill) music buffers if data already processed
2016-08-01 12:49:17 +02:00
void UpdateMusicStream ( Music music )
2016-07-15 18:16:34 +02:00
{
2021-05-07 15:38:13 +02:00
if ( music . stream . buffer = = NULL ) return ;
2021-03-22 12:36:13 -07:00
2021-05-07 15:38:13 +02:00
# if defined(SUPPORT_FILEFORMAT_XM)
if ( music . ctxType = = MUSIC_MODULE_XM ) jar_xm_set_max_loop_count ( music . ctxData , music . looping ? 0 : 1 ) ;
# endif
2020-02-18 16:30:52 +01:00
2017-11-12 21:55:24 +10:00
bool streamEnding = false ;
2020-02-03 18:31:30 +01:00
unsigned int subBufferSizeInFrames = music . stream . buffer - > sizeInFrames / 2 ;
2017-11-24 21:54:00 +10:00
2017-11-12 21:55:24 +10:00
// NOTE: Using dynamic allocation because it could require more than 16KB
2019-07-26 10:26:39 +02:00
void * pcm = RL_CALLOC ( subBufferSizeInFrames * music . stream . channels * music . stream . sampleSize / 8 , 1 ) ;
2017-11-12 21:55:24 +10:00
2019-09-03 23:08:02 +02:00
int samplesCount = 0 ; // Total size of data streamed in L+R samples for xm floats, individual L or R for ogg shorts
2019-10-17 17:18:03 +02:00
2019-09-03 23:08:02 +02:00
// TODO: Get the sampleLeft using totalFramesProcessed... but first, get total frames processed correctly...
//ma_uint32 frameSizeInBytes = ma_get_bytes_per_sample(music.stream.buffer->dsp.formatConverterIn.config.formatIn)*music.stream.buffer->dsp.formatConverterIn.config.channels;
int sampleLeft = music . sampleCount - ( music . stream . buffer - > totalFramesProcessed * music . stream . channels ) ;
2017-11-12 21:55:24 +10:00
2021-03-22 12:36:13 -07:00
if ( music . ctxType = = MUSIC_MODULE_XM & & music . looping ) sampleLeft = subBufferSizeInFrames * 4 ;
2019-08-13 17:41:31 +02:00
while ( IsAudioStreamProcessed ( music . stream ) )
2017-11-12 21:55:24 +10:00
{
2019-09-03 23:08:02 +02:00
if ( ( sampleLeft / music . stream . channels ) > = subBufferSizeInFrames ) samplesCount = subBufferSizeInFrames * music . stream . channels ;
else samplesCount = sampleLeft ;
2017-11-12 21:55:24 +10:00
2019-07-26 10:26:39 +02:00
switch ( music . ctxType )
2017-11-12 21:55:24 +10:00
{
2020-05-23 23:19:59 +02:00
# if defined(SUPPORT_FILEFORMAT_WAV)
case MUSIC_AUDIO_WAV :
{
// NOTE: Returns the number of samples to process (not required)
2020-08-11 19:08:07 +02:00
if ( music . stream . sampleSize = = 16 ) drwav_read_pcm_frames_s16 ( ( drwav * ) music . ctxData , samplesCount / music . stream . channels , ( short * ) pcm ) ;
else if ( music . stream . sampleSize = = 32 ) drwav_read_pcm_frames_f32 ( ( drwav * ) music . ctxData , samplesCount / music . stream . channels , ( float * ) pcm ) ;
2020-05-23 23:19:59 +02:00
} break ;
# endif
2019-02-14 12:32:23 +02:00
# if defined(SUPPORT_FILEFORMAT_OGG)
2017-11-12 21:55:24 +10:00
case MUSIC_AUDIO_OGG :
{
// NOTE: Returns the number of samples to process (be careful! we ask for number of shorts!)
2019-07-26 10:26:39 +02:00
stb_vorbis_get_samples_short_interleaved ( ( stb_vorbis * ) music . ctxData , music . stream . channels , ( short * ) pcm , samplesCount ) ;
2017-11-12 21:55:24 +10:00
} break ;
2019-02-14 12:32:23 +02:00
# endif
2017-11-12 21:55:24 +10:00
# if defined(SUPPORT_FILEFORMAT_FLAC)
case MUSIC_AUDIO_FLAC :
{
2019-07-23 22:21:01 +02:00
// NOTE: Returns the number of samples to process (not required)
2019-09-03 23:24:09 +02:00
drflac_read_pcm_frames_s16 ( ( drflac * ) music . ctxData , samplesCount , ( short * ) pcm ) ;
2017-11-12 21:55:24 +10:00
} break ;
# endif
2018-05-17 00:04:58 +02:00
# if defined(SUPPORT_FILEFORMAT_MP3)
2018-11-06 15:10:50 +01:00
case MUSIC_AUDIO_MP3 :
2018-05-17 00:04:58 +02:00
{
2018-10-31 17:04:24 +01:00
// NOTE: samplesCount, actually refers to framesCount and returns the number of frames processed
2019-07-26 10:26:39 +02:00
drmp3_read_pcm_frames_f32 ( ( drmp3 * ) music . ctxData , samplesCount / music . stream . channels , ( float * ) pcm ) ;
2018-05-17 00:04:58 +02:00
} break ;
# endif
2017-11-12 21:55:24 +10:00
# if defined(SUPPORT_FILEFORMAT_XM)
2018-11-06 15:10:50 +01:00
case MUSIC_MODULE_XM :
2018-10-31 17:04:24 +01:00
{
2021-04-09 02:00:21 -07:00
switch ( AUDIO_DEVICE_FORMAT )
{
case ma_format_f32 :
// NOTE: Internally this function considers 2 channels generation, so samplesCount/2
2021-06-03 20:25:28 +02:00
jar_xm_generate_samples ( ( jar_xm_context_t * ) music . ctxData , ( float * ) pcm , samplesCount / 2 ) ;
2021-04-09 02:00:21 -07:00
break ;
case ma_format_s16 :
// NOTE: Internally this function considers 2 channels generation, so samplesCount/2
2021-06-03 20:25:28 +02:00
jar_xm_generate_samples_16bit ( ( jar_xm_context_t * ) music . ctxData , ( short * ) pcm , samplesCount / 2 ) ;
2021-04-09 02:00:21 -07:00
break ;
case ma_format_u8 :
// NOTE: Internally this function considers 2 channels generation, so samplesCount/2
2021-06-03 20:25:28 +02:00
jar_xm_generate_samples_8bit ( ( jar_xm_context_t * ) music . ctxData , ( char * ) pcm , samplesCount / 2 ) ;
2021-04-09 02:00:21 -07:00
break ;
}
2018-10-31 17:04:24 +01:00
} break ;
2017-11-12 21:55:24 +10:00
# endif
# if defined(SUPPORT_FILEFORMAT_MOD)
2019-02-21 18:45:19 +01:00
case MUSIC_MODULE_MOD :
2018-12-25 15:18:35 +01:00
{
// NOTE: 3rd parameter (nbsample) specify the number of stereo 16bits samples you want, so sampleCount/2
2019-07-26 10:26:39 +02:00
jar_mod_fillbuffer ( ( jar_mod_context_t * ) music . ctxData , ( short * ) pcm , samplesCount / 2 , 0 ) ;
2018-12-25 15:18:35 +01:00
} break ;
2017-11-12 21:55:24 +10:00
# endif
default : break ;
}
2019-07-26 10:26:39 +02:00
UpdateAudioStream ( music . stream , pcm , samplesCount ) ;
2019-10-17 17:18:03 +02:00
2021-03-22 12:36:13 -07:00
if ( ( music . ctxType = = MUSIC_MODULE_XM ) | | music . ctxType = = MUSIC_MODULE_MOD )
2018-12-25 15:18:35 +01:00
{
2021-03-22 20:41:33 +01:00
if ( samplesCount > 1 ) sampleLeft - = samplesCount / 2 ;
else sampleLeft - = samplesCount ;
2018-12-25 15:18:35 +01:00
}
2019-09-03 23:08:02 +02:00
else sampleLeft - = samplesCount ;
2017-11-12 21:55:24 +10:00
2019-09-03 23:08:02 +02:00
if ( sampleLeft < = 0 )
2017-11-12 21:55:24 +10:00
{
streamEnding = true ;
break ;
}
}
// Free allocated pcm data
2019-04-23 14:55:35 +02:00
RL_FREE ( pcm ) ;
2017-11-12 21:55:24 +10:00
// Reset audio stream for looping
if ( streamEnding )
{
2020-05-14 14:00:37 +02:00
StopMusicStream ( music ) ; // Stop music (and reset)
if ( music . looping ) PlayMusicStream ( music ) ; // Play again
2017-11-12 21:55:24 +10:00
}
else
{
// NOTE: In case window is minimized, music stream is stopped,
// just make sure to play again on window restore
2021-05-20 19:27:04 +02:00
if ( IsMusicStreamPlaying ( music ) ) PlayMusicStream ( music ) ;
2017-11-12 21:55:24 +10:00
}
2014-04-19 16:36:49 +02:00
}
2016-05-11 18:14:59 -07:00
// Check if any music is playing
2021-05-20 19:27:04 +02:00
bool IsMusicStreamPlaying ( Music music )
2014-04-09 20:25:26 +02:00
{
2019-07-26 10:26:39 +02:00
return IsAudioStreamPlaying ( music . stream ) ;
2014-04-09 20:25:26 +02:00
}
2014-04-19 16:36:49 +02:00
// Set volume for music
2016-08-01 12:49:17 +02:00
void SetMusicVolume ( Music music , float volume )
2014-01-23 12:36:18 +01:00
{
2019-07-26 10:26:39 +02:00
SetAudioStreamVolume ( music . stream , volume ) ;
2016-05-11 18:14:59 -07:00
}
2016-06-02 17:12:31 +02:00
// Set pitch for music
2016-08-01 12:49:17 +02:00
void SetMusicPitch ( Music music , float pitch )
2016-05-11 18:14:59 -07:00
{
2020-12-12 13:01:31 +01:00
SetAudioBufferPitch ( music . stream . buffer , pitch ) ;
2014-01-23 12:36:18 +01:00
}
2017-01-24 00:32:16 +01:00
2016-06-01 20:09:00 -07:00
// Get music time length (in seconds)
2016-08-01 12:49:17 +02:00
float GetMusicTimeLength ( Music music )
2014-01-23 12:36:18 +01:00
{
2019-01-02 14:05:20 +01:00
float totalSeconds = 0.0f ;
2019-02-21 18:45:19 +01:00
2019-07-26 10:26:39 +02:00
totalSeconds = ( float ) music . sampleCount / ( music . stream . sampleRate * music . stream . channels ) ;
2016-08-16 11:09:55 +02:00
2014-04-19 16:36:49 +02:00
return totalSeconds ;
}
// Get current music time played (in seconds)
2016-08-01 12:49:17 +02:00
float GetMusicTimePlayed ( Music music )
2014-04-19 16:36:49 +02:00
{
2021-03-22 12:36:13 -07:00
# if defined(SUPPORT_FILEFORMAT_XM)
2021-03-22 20:41:33 +01:00
if ( music . ctxType = = MUSIC_MODULE_XM )
{
2021-03-22 12:36:13 -07:00
uint64_t samples = 0 ;
jar_xm_get_position ( music . ctxData , NULL , NULL , NULL , & samples ) ;
samples = samples % ( music . sampleCount ) ;
2021-03-22 20:41:33 +01:00
return ( float ) ( samples ) / ( music . stream . sampleRate * music . stream . channels ) ;
}
2021-03-22 12:36:13 -07:00
# endif
2016-05-21 18:08:09 +02:00
float secondsPlayed = 0.0f ;
2020-02-18 16:30:52 +01:00
if ( music . stream . buffer ! = NULL )
{
//ma_uint32 frameSizeInBytes = ma_get_bytes_per_sample(music.stream.buffer->dsp.formatConverterIn.config.formatIn)*music.stream.buffer->dsp.formatConverterIn.config.channels;
unsigned int samplesPlayed = music . stream . buffer - > totalFramesProcessed * music . stream . channels ;
2020-11-19 20:11:11 +01:00
secondsPlayed = ( float ) samplesPlayed / ( music . stream . sampleRate * music . stream . channels ) ;
2020-02-18 16:30:52 +01:00
}
2016-08-01 12:49:17 +02:00
return secondsPlayed ;
}
2021-06-03 23:36:47 +02:00
// Load audio stream (to stream audio pcm data)
AudioStream LoadAudioStream ( unsigned int sampleRate , unsigned int sampleSize , unsigned int channels )
2016-08-01 12:49:17 +02:00
{
AudioStream stream = { 0 } ;
2016-08-16 11:09:55 +02:00
2016-08-01 12:49:17 +02:00
stream . sampleRate = sampleRate ;
stream . sampleSize = sampleSize ;
2019-09-03 23:08:02 +02:00
stream . channels = channels ;
2016-08-01 12:49:17 +02:00
2019-03-12 11:54:45 +01:00
ma_format formatIn = ( ( stream . sampleSize = = 8 ) ? ma_format_u8 : ( ( stream . sampleSize = = 16 ) ? ma_format_s16 : ma_format_f32 ) ) ;
2017-11-12 20:59:16 +10:00
2019-07-23 22:21:01 +02:00
// The size of a streaming buffer must be at least double the size of a period
2020-02-12 14:45:02 +01:00
unsigned int periodSize = AUDIO . System . device . playback . internalPeriodSizeInFrames ;
2021-03-21 14:07:55 -07:00
unsigned int subBufferSize = GetAudioStreamBufferSizeDefault ( ) ;
2019-10-17 17:18:03 +02:00
2017-12-20 11:37:43 +01:00
if ( subBufferSize < periodSize ) subBufferSize = periodSize ;
2017-11-24 21:54:00 +10:00
2020-02-14 17:13:37 +01:00
// Create a double audio buffer of defined size
stream . buffer = LoadAudioBuffer ( formatIn , stream . channels , stream . sampleRate , subBufferSize * 2 , AUDIO_BUFFER_USAGE_STREAM ) ;
2019-10-17 17:18:03 +02:00
2019-09-03 23:08:02 +02:00
if ( stream . buffer ! = NULL )
2017-11-12 20:59:16 +10:00
{
2019-09-03 23:08:02 +02:00
stream . buffer - > looping = true ; // Always loop for streaming buffers
2020-03-27 17:16:30 +01:00
TRACELOG ( LOG_INFO , " STREAM: Initialized successfully (%i Hz, %i bit, %s) " , stream . sampleRate , stream . sampleSize , ( stream . channels = = 1 ) ? " Mono " : " Stereo " ) ;
2017-11-12 20:59:16 +10:00
}
2020-03-27 17:16:30 +01:00
else TRACELOG ( LOG_WARNING , " STREAM: Failed to load audio buffer, stream could not be created " ) ;
2016-08-01 12:49:17 +02:00
return stream ;
2014-04-19 16:36:49 +02:00
}
2021-06-03 23:36:47 +02:00
// Unload audio stream and free memory
void UnloadAudioStream ( AudioStream stream )
2016-08-01 12:49:17 +02:00
{
2020-02-14 17:13:37 +01:00
UnloadAudioBuffer ( stream . buffer ) ;
2018-11-06 15:10:50 +01:00
2020-03-27 17:16:30 +01:00
TRACELOG ( LOG_INFO , " STREAM: Unloaded audio stream data from RAM " ) ;
2016-08-01 12:49:17 +02:00
}
2016-08-02 17:32:24 +02:00
// Update audio stream buffers with data
2017-05-10 19:34:57 +02:00
// NOTE 1: Only updates one buffer of the stream source: unqueue -> update -> queue
2019-08-13 17:41:31 +02:00
// NOTE 2: To unqueue a buffer it needs to be processed: IsAudioStreamProcessed()
2017-02-09 22:19:48 +01:00
void UpdateAudioStream ( AudioStream stream , const void * data , int samplesCount )
2016-08-02 17:32:24 +02:00
{
2020-02-18 16:30:52 +01:00
if ( stream . buffer ! = NULL )
2017-11-12 20:59:16 +10:00
{
2020-02-18 16:30:52 +01:00
if ( stream . buffer - > isSubBufferProcessed [ 0 ] | | stream . buffer - > isSubBufferProcessed [ 1 ] )
2019-09-03 23:08:02 +02:00
{
ma_uint32 subBufferToUpdate = 0 ;
2017-11-12 20:59:16 +10:00
2020-02-18 16:30:52 +01:00
if ( stream . buffer - > isSubBufferProcessed [ 0 ] & & stream . buffer - > isSubBufferProcessed [ 1 ] )
2019-09-03 23:08:02 +02:00
{
2019-10-17 17:18:03 +02:00
// Both buffers are available for updating.
2019-09-03 23:08:02 +02:00
// Update the first one and make sure the cursor is moved back to the front.
subBufferToUpdate = 0 ;
2020-02-18 16:30:52 +01:00
stream . buffer - > frameCursorPos = 0 ;
2019-09-03 23:08:02 +02:00
}
else
{
// Just update whichever sub-buffer is processed.
2020-02-18 16:30:52 +01:00
subBufferToUpdate = ( stream . buffer - > isSubBufferProcessed [ 0 ] ) ? 0 : 1 ;
2019-09-03 23:08:02 +02:00
}
2018-11-06 15:10:50 +01:00
2020-02-18 16:30:52 +01:00
ma_uint32 subBufferSizeInFrames = stream . buffer - > sizeInFrames / 2 ;
unsigned char * subBuffer = stream . buffer - > data + ( ( subBufferSizeInFrames * stream . channels * ( stream . sampleSize / 8 ) ) * subBufferToUpdate ) ;
2017-11-12 20:59:16 +10:00
2019-09-03 23:08:02 +02:00
// TODO: Get total frames processed on this buffer... DOES NOT WORK.
2020-02-18 16:30:52 +01:00
stream . buffer - > totalFramesProcessed + = subBufferSizeInFrames ;
2017-11-12 20:59:16 +10:00
2019-10-17 17:18:03 +02:00
// Does this API expect a whole buffer to be updated in one go?
2019-09-03 23:08:02 +02:00
// Assuming so, but if not will need to change this logic.
if ( subBufferSizeInFrames > = ( ma_uint32 ) samplesCount / stream . channels )
{
ma_uint32 framesToWrite = subBufferSizeInFrames ;
2018-11-06 15:10:50 +01:00
2019-09-03 23:08:02 +02:00
if ( framesToWrite > ( ( ma_uint32 ) samplesCount / stream . channels ) ) framesToWrite = ( ma_uint32 ) samplesCount / stream . channels ;
2017-11-12 20:59:16 +10:00
2019-09-03 23:08:02 +02:00
ma_uint32 bytesToWrite = framesToWrite * stream . channels * ( stream . sampleSize / 8 ) ;
memcpy ( subBuffer , data , bytesToWrite ) ;
2017-11-12 20:59:16 +10:00
2019-09-03 23:08:02 +02:00
// Any leftover frames should be filled with zeros.
ma_uint32 leftoverFrameCount = subBufferSizeInFrames - framesToWrite ;
2018-11-06 15:10:50 +01:00
2019-09-03 23:08:02 +02:00
if ( leftoverFrameCount > 0 ) memset ( subBuffer + bytesToWrite , 0 , leftoverFrameCount * stream . channels * ( stream . sampleSize / 8 ) ) ;
2017-11-12 20:59:16 +10:00
2020-02-18 16:30:52 +01:00
stream . buffer - > isSubBufferProcessed [ subBufferToUpdate ] = false ;
2019-09-03 23:08:02 +02:00
}
2020-03-27 17:16:30 +01:00
else TRACELOG ( LOG_WARNING , " STREAM: Attempting to write too many frames to buffer " ) ;
2017-11-12 20:59:16 +10:00
}
2020-03-27 17:16:30 +01:00
else TRACELOG ( LOG_WARNING , " STREAM: Buffer not available for updating " ) ;
2017-11-12 20:59:16 +10:00
}
2016-08-02 17:32:24 +02:00
}
// Check if any audio stream buffers requires refill
2019-08-13 17:41:31 +02:00
bool IsAudioStreamProcessed ( AudioStream stream )
2016-08-02 17:32:24 +02:00
{
2020-02-14 11:44:50 +01:00
if ( stream . buffer = = NULL ) return false ;
2017-11-12 20:59:16 +10:00
2019-07-23 22:21:01 +02:00
return ( stream . buffer - > isSubBufferProcessed [ 0 ] | | stream . buffer - > isSubBufferProcessed [ 1 ] ) ;
2016-08-01 12:49:17 +02:00
}
2014-04-19 16:36:49 +02:00
2016-08-02 17:32:24 +02:00
// Play audio stream
void PlayAudioStream ( AudioStream stream )
2014-04-19 16:36:49 +02:00
{
2019-07-23 22:21:01 +02:00
PlayAudioBuffer ( stream . buffer ) ;
2016-08-02 17:32:24 +02:00
}
2016-07-29 21:35:57 +02:00
2016-08-02 17:32:24 +02:00
// Play audio stream
void PauseAudioStream ( AudioStream stream )
{
2019-07-23 22:21:01 +02:00
PauseAudioBuffer ( stream . buffer ) ;
2016-08-02 17:32:24 +02:00
}
2016-07-29 21:35:57 +02:00
2016-08-02 17:32:24 +02:00
// Resume audio stream playing
void ResumeAudioStream ( AudioStream stream )
{
2019-07-23 22:21:01 +02:00
ResumeAudioBuffer ( stream . buffer ) ;
2014-04-19 16:36:49 +02:00
}
2017-11-12 21:55:24 +10:00
// Check if audio stream is playing.
bool IsAudioStreamPlaying ( AudioStream stream )
{
2019-07-23 22:21:01 +02:00
return IsAudioBufferPlaying ( stream . buffer ) ;
2017-11-12 21:55:24 +10:00
}
2016-08-02 17:32:24 +02:00
// Stop audio stream
void StopAudioStream ( AudioStream stream )
{
2019-07-23 22:21:01 +02:00
StopAudioBuffer ( stream . buffer ) ;
2016-08-02 17:32:24 +02:00
}
2020-02-14 17:13:37 +01:00
// Set volume for audio stream (1.0 is max level)
2017-11-14 21:15:50 +10:00
void SetAudioStreamVolume ( AudioStream stream , float volume )
{
2019-07-23 22:21:01 +02:00
SetAudioBufferVolume ( stream . buffer , volume ) ;
2017-11-14 21:15:50 +10:00
}
2020-02-14 17:13:37 +01:00
// Set pitch for audio stream (1.0 is base level)
2017-11-14 21:15:50 +10:00
void SetAudioStreamPitch ( AudioStream stream , float pitch )
{
2019-07-23 22:21:01 +02:00
SetAudioBufferPitch ( stream . buffer , pitch ) ;
2017-11-14 21:15:50 +10:00
}
2020-02-14 17:13:37 +01:00
// Default size for new audio streams
void SetAudioStreamBufferSizeDefault ( int size )
{
AUDIO . Buffer . defaultSize = size ;
}
2021-03-21 14:07:55 -07:00
int GetAudioStreamBufferSizeDefault ( )
{
// if the buffer is not set, compute one that would give us a buffer good enough for a decent frame rate
if ( AUDIO . Buffer . defaultSize = = 0 )
2021-03-22 20:41:33 +01:00
AUDIO . Buffer . defaultSize = AUDIO . System . device . sampleRate / 30 ;
2021-03-21 14:07:55 -07:00
2021-03-22 20:41:33 +01:00
return AUDIO . Buffer . defaultSize ;
2021-03-21 14:07:55 -07:00
}
2021-03-22 20:41:33 +01:00
2016-08-02 17:32:24 +02:00
//----------------------------------------------------------------------------------
// Module specific Functions Definition
//----------------------------------------------------------------------------------
2020-02-03 18:31:30 +01:00
// Log callback function
static void OnLog ( ma_context * pContext , ma_device * pDevice , ma_uint32 logLevel , const char * message )
{
( void ) pContext ;
( void ) pDevice ;
2021-02-05 19:19:44 +01:00
TRACELOG ( LOG_WARNING , " miniaudio: %s " , message ) ; // All log messages from miniaudio are errors
2020-02-03 18:31:30 +01:00
}
2020-02-04 22:43:31 +10:00
// Reads audio data from an AudioBuffer object in internal format.
static ma_uint32 ReadAudioBufferFramesInInternalFormat ( AudioBuffer * audioBuffer , void * framesOut , ma_uint32 frameCount )
{
ma_uint32 subBufferSizeInFrames = ( audioBuffer - > sizeInFrames > 1 ) ? audioBuffer - > sizeInFrames / 2 : audioBuffer - > sizeInFrames ;
ma_uint32 currentSubBufferIndex = audioBuffer - > frameCursorPos / subBufferSizeInFrames ;
2020-02-26 20:23:36 +01:00
if ( currentSubBufferIndex > 1 ) return 0 ;
2020-02-04 22:43:31 +10:00
// Another thread can update the processed state of buffers so
// we just take a copy here to try and avoid potential synchronization problems
bool isSubBufferProcessed [ 2 ] ;
isSubBufferProcessed [ 0 ] = audioBuffer - > isSubBufferProcessed [ 0 ] ;
isSubBufferProcessed [ 1 ] = audioBuffer - > isSubBufferProcessed [ 1 ] ;
ma_uint32 frameSizeInBytes = ma_get_bytes_per_frame ( audioBuffer - > converter . config . formatIn , audioBuffer - > converter . config . channelsIn ) ;
// Fill out every frame until we find a buffer that's marked as processed. Then fill the remainder with 0
ma_uint32 framesRead = 0 ;
while ( 1 )
{
// We break from this loop differently depending on the buffer's usage
// - For static buffers, we simply fill as much data as we can
// - For streaming buffers we only fill the halves of the buffer that are processed
// Unprocessed halves must keep their audio data in-tact
if ( audioBuffer - > usage = = AUDIO_BUFFER_USAGE_STATIC )
{
if ( framesRead > = frameCount ) break ;
}
else
{
if ( isSubBufferProcessed [ currentSubBufferIndex ] ) break ;
}
ma_uint32 totalFramesRemaining = ( frameCount - framesRead ) ;
if ( totalFramesRemaining = = 0 ) break ;
ma_uint32 framesRemainingInOutputBuffer ;
if ( audioBuffer - > usage = = AUDIO_BUFFER_USAGE_STATIC )
{
framesRemainingInOutputBuffer = audioBuffer - > sizeInFrames - audioBuffer - > frameCursorPos ;
}
else
{
ma_uint32 firstFrameIndexOfThisSubBuffer = subBufferSizeInFrames * currentSubBufferIndex ;
framesRemainingInOutputBuffer = subBufferSizeInFrames - ( audioBuffer - > frameCursorPos - firstFrameIndexOfThisSubBuffer ) ;
}
ma_uint32 framesToRead = totalFramesRemaining ;
if ( framesToRead > framesRemainingInOutputBuffer ) framesToRead = framesRemainingInOutputBuffer ;
memcpy ( ( unsigned char * ) framesOut + ( framesRead * frameSizeInBytes ) , audioBuffer - > data + ( audioBuffer - > frameCursorPos * frameSizeInBytes ) , framesToRead * frameSizeInBytes ) ;
audioBuffer - > frameCursorPos = ( audioBuffer - > frameCursorPos + framesToRead ) % audioBuffer - > sizeInFrames ;
framesRead + = framesToRead ;
// If we've read to the end of the buffer, mark it as processed
if ( framesToRead = = framesRemainingInOutputBuffer )
{
audioBuffer - > isSubBufferProcessed [ currentSubBufferIndex ] = true ;
isSubBufferProcessed [ currentSubBufferIndex ] = true ;
currentSubBufferIndex = ( currentSubBufferIndex + 1 ) % 2 ;
// We need to break from this loop if we're not looping
if ( ! audioBuffer - > looping )
{
StopAudioBuffer ( audioBuffer ) ;
break ;
}
}
}
// Zero-fill excess
ma_uint32 totalFramesRemaining = ( frameCount - framesRead ) ;
if ( totalFramesRemaining > 0 )
{
memset ( ( unsigned char * ) framesOut + ( framesRead * frameSizeInBytes ) , 0 , totalFramesRemaining * frameSizeInBytes ) ;
// For static buffers we can fill the remaining frames with silence for safety, but we don't want
// to report those frames as "read". The reason for this is that the caller uses the return value
// to know whether or not a non-looping sound has finished playback.
if ( audioBuffer - > usage ! = AUDIO_BUFFER_USAGE_STATIC ) framesRead + = totalFramesRemaining ;
}
return framesRead ;
}
// Reads audio data from an AudioBuffer object in device format. Returned data will be in a format appropriate for mixing.
static ma_uint32 ReadAudioBufferFramesInMixingFormat ( AudioBuffer * audioBuffer , float * framesOut , ma_uint32 frameCount )
{
2020-02-26 20:23:36 +01:00
// What's going on here is that we're continuously converting data from the AudioBuffer's internal format to the mixing format, which
2020-02-04 22:43:31 +10:00
// should be defined by the output format of the data converter. We do this until frameCount frames have been output. The important
// detail to remember here is that we never, ever attempt to read more input data than is required for the specified number of output
// frames. This can be achieved with ma_data_converter_get_required_input_frame_count().
ma_uint8 inputBuffer [ 4096 ] ;
2020-05-23 19:23:40 +02:00
ma_uint32 inputBufferFrameCap = sizeof ( inputBuffer ) / ma_get_bytes_per_frame ( audioBuffer - > converter . config . formatIn , audioBuffer - > converter . config . channelsIn ) ;
2020-02-04 22:43:31 +10:00
ma_uint32 totalOutputFramesProcessed = 0 ;
while ( totalOutputFramesProcessed < frameCount )
{
ma_uint64 outputFramesToProcessThisIteration = frameCount - totalOutputFramesProcessed ;
ma_uint64 inputFramesToProcessThisIteration = ma_data_converter_get_required_input_frame_count ( & audioBuffer - > converter , outputFramesToProcessThisIteration ) ;
if ( inputFramesToProcessThisIteration > inputBufferFrameCap )
{
inputFramesToProcessThisIteration = inputBufferFrameCap ;
}
2021-03-22 20:41:33 +01:00
float * runningFramesOut = framesOut + ( totalOutputFramesProcessed * audioBuffer - > converter . config . channelsOut ) ;
2020-02-04 22:43:31 +10:00
/* At this point we can convert the data to our mixing format. */
ma_uint64 inputFramesProcessedThisIteration = ReadAudioBufferFramesInInternalFormat ( audioBuffer , inputBuffer , ( ma_uint32 ) inputFramesToProcessThisIteration ) ; /* Safe cast. */
ma_uint64 outputFramesProcessedThisIteration = outputFramesToProcessThisIteration ;
ma_data_converter_process_pcm_frames ( & audioBuffer - > converter , inputBuffer , & inputFramesProcessedThisIteration , runningFramesOut , & outputFramesProcessedThisIteration ) ;
2020-02-26 20:23:36 +01:00
2020-02-04 22:43:31 +10:00
totalOutputFramesProcessed + = ( ma_uint32 ) outputFramesProcessedThisIteration ; /* Safe cast. */
if ( inputFramesProcessedThisIteration < inputFramesToProcessThisIteration )
{
break ; /* Ran out of input data. */
}
/* This should never be hit, but will add it here for safety. Ensures we get out of the loop when no input nor output frames are processed. */
if ( inputFramesProcessedThisIteration = = 0 & & outputFramesProcessedThisIteration = = 0 )
{
break ;
}
}
return totalOutputFramesProcessed ;
}
2020-02-03 18:31:30 +01:00
// Sending audio data to device callback function
2021-05-31 19:32:48 +02:00
// This function will be called when miniaudio needs more data
2020-02-03 18:31:30 +01:00
// NOTE: All the mixing takes place here
static void OnSendAudioDataToDevice ( ma_device * pDevice , void * pFramesOut , const void * pFramesInput , ma_uint32 frameCount )
{
( void ) pDevice ;
// Mixing is basically just an accumulation, we need to initialize the output buffer to 0
memset ( pFramesOut , 0 , frameCount * pDevice - > playback . channels * ma_get_bytes_per_sample ( pDevice - > playback . format ) ) ;
// Using a mutex here for thread-safety which makes things not real-time
// This is unlikely to be necessary for this project, but may want to consider how you might want to avoid this
ma_mutex_lock ( & AUDIO . System . lock ) ;
{
for ( AudioBuffer * audioBuffer = AUDIO . Buffer . first ; audioBuffer ! = NULL ; audioBuffer = audioBuffer - > next )
{
// Ignore stopped or paused sounds
if ( ! audioBuffer - > playing | | audioBuffer - > paused ) continue ;
ma_uint32 framesRead = 0 ;
while ( 1 )
{
2020-02-26 20:23:36 +01:00
if ( framesRead > = frameCount ) break ;
2020-02-03 18:31:30 +01:00
// Just read as much data as we can from the stream
ma_uint32 framesToRead = ( frameCount - framesRead ) ;
while ( framesToRead > 0 )
{
float tempBuffer [ 1024 ] ; // 512 frames for stereo
ma_uint32 framesToReadRightNow = framesToRead ;
2020-02-14 17:13:37 +01:00
if ( framesToReadRightNow > sizeof ( tempBuffer ) / sizeof ( tempBuffer [ 0 ] ) / AUDIO_DEVICE_CHANNELS )
2020-02-03 18:31:30 +01:00
{
2020-02-14 17:13:37 +01:00
framesToReadRightNow = sizeof ( tempBuffer ) / sizeof ( tempBuffer [ 0 ] ) / AUDIO_DEVICE_CHANNELS ;
2020-02-03 18:31:30 +01:00
}
2020-02-04 22:43:31 +10:00
ma_uint32 framesJustRead = ReadAudioBufferFramesInMixingFormat ( audioBuffer , tempBuffer , framesToReadRightNow ) ;
2020-02-03 18:31:30 +01:00
if ( framesJustRead > 0 )
{
float * framesOut = ( float * ) pFramesOut + ( framesRead * AUDIO . System . device . playback . channels ) ;
float * framesIn = tempBuffer ;
MixAudioFrames ( framesOut , framesIn , framesJustRead , audioBuffer - > volume ) ;
framesToRead - = framesJustRead ;
framesRead + = framesJustRead ;
}
2020-02-03 19:26:28 +01:00
2020-02-03 18:31:30 +01:00
if ( ! audioBuffer - > playing )
{
framesRead = frameCount ;
break ;
}
// If we weren't able to read all the frames we requested, break
if ( framesJustRead < framesToReadRightNow )
{
if ( ! audioBuffer - > looping )
{
StopAudioBuffer ( audioBuffer ) ;
break ;
}
else
{
// Should never get here, but just for safety,
// move the cursor position back to the start and continue the loop
audioBuffer - > frameCursorPos = 0 ;
continue ;
}
}
}
// If for some reason we weren't able to read every frame we'll need to break from the loop
// Not doing this could theoretically put us into an infinite loop
if ( framesToRead > 0 ) break ;
}
}
}
ma_mutex_unlock ( & AUDIO . System . lock ) ;
}
// This is the main mixing function. Mixing is pretty simple in this project - it's just an accumulation.
// NOTE: framesOut is both an input and an output. It will be initially filled with zeros outside of this function.
static void MixAudioFrames ( float * framesOut , const float * framesIn , ma_uint32 frameCount , float localVolume )
{
for ( ma_uint32 iFrame = 0 ; iFrame < frameCount ; + + iFrame )
{
for ( ma_uint32 iChannel = 0 ; iChannel < AUDIO . System . device . playback . channels ; + + iChannel )
{
float * frameOut = framesOut + ( iFrame * AUDIO . System . device . playback . channels ) ;
const float * frameIn = framesIn + ( iFrame * AUDIO . System . device . playback . channels ) ;
2020-02-04 22:43:31 +10:00
frameOut [ iChannel ] + = ( frameIn [ iChannel ] * localVolume ) ;
2020-02-03 18:31:30 +01:00
}
}
}
2017-03-26 22:49:01 +02:00
# if defined(SUPPORT_FILEFORMAT_WAV)
2020-09-13 15:38:57 +02:00
// Load WAV file data into Wave structure
// NOTE: Using dr_wav library
2020-09-14 19:20:38 +02:00
static Wave LoadWAV ( const unsigned char * fileData , unsigned int fileSize )
2013-11-18 23:38:44 +01:00
{
2016-01-23 13:22:13 +01:00
Wave wave = { 0 } ;
2020-05-24 00:20:32 +02:00
drwav wav = { 0 } ;
2020-11-03 23:47:33 +01:00
2020-05-24 00:20:32 +02:00
bool success = drwav_init_memory ( & wav , fileData , fileSize , NULL ) ;
2020-11-03 23:47:33 +01:00
2020-05-24 00:20:32 +02:00
if ( success )
{
2020-11-29 23:14:11 -08:00
wave . sampleCount = ( unsigned int ) wav . totalPCMFrameCount * wav . channels ;
2020-05-24 00:20:32 +02:00
wave . sampleRate = wav . sampleRate ;
wave . sampleSize = 16 ; // NOTE: We are forcing conversion to 16bit
wave . channels = wav . channels ;
wave . data = ( short * ) RL_MALLOC ( wave . sampleCount * sizeof ( short ) ) ;
drwav_read_pcm_frames_s16 ( & wav , wav . totalPCMFrameCount , wave . data ) ;
}
2020-09-13 15:38:57 +02:00
else TRACELOG ( LOG_WARNING , " WAVE: Failed to load WAV data " ) ;
2020-11-03 23:47:33 +01:00
2020-05-24 00:20:32 +02:00
drwav_uninit ( & wav ) ;
2020-07-31 12:13:04 +02:00
2013-11-23 13:30:54 +01:00
return wave ;
2013-12-01 12:34:31 +01:00
}
2018-10-29 16:18:06 +01:00
// Save wave data as WAV file
2020-09-13 15:38:57 +02:00
// NOTE: Using dr_wav library
2018-10-29 16:18:06 +01:00
static int SaveWAV ( Wave wave , const char * fileName )
{
2020-11-22 00:10:16 +01:00
int success = false ;
2020-12-23 15:03:26 +01:00
2020-05-23 23:19:59 +02:00
drwav wav = { 0 } ;
drwav_data_format format = { 0 } ;
2020-09-15 13:17:10 +02:00
format . container = drwav_container_riff ;
format . format = DR_WAVE_FORMAT_PCM ;
2020-05-23 23:19:59 +02:00
format . channels = wave . channels ;
format . sampleRate = wave . sampleRate ;
format . bitsPerSample = wave . sampleSize ;
2020-11-03 23:47:33 +01:00
2021-01-15 00:20:23 +01:00
void * fileData = NULL ;
2020-11-29 23:14:11 -08:00
size_t fileDataSize = 0 ;
2020-11-22 00:10:16 +01:00
success = drwav_init_memory_write ( & wav , & fileData , & fileDataSize , & format , NULL ) ;
2020-11-29 23:14:11 -08:00
if ( success ) success = ( int ) drwav_write_pcm_frames ( & wav , wave . sampleCount / wave . channels , wave . data ) ;
2020-11-22 00:10:16 +01:00
drwav_result result = drwav_uninit ( & wav ) ;
2020-12-23 15:03:26 +01:00
2021-01-15 00:20:23 +01:00
if ( result = = DRWAV_SUCCESS ) success = SaveFileData ( fileName , ( unsigned char * ) fileData , ( unsigned int ) fileDataSize ) ;
2020-12-23 15:03:26 +01:00
2020-09-15 13:17:10 +02:00
drwav_free ( fileData , NULL ) ;
2020-11-03 23:47:33 +01:00
2020-11-22 00:10:16 +01:00
return success ;
2018-10-29 16:18:06 +01:00
}
2017-03-26 22:49:01 +02:00
# endif
2013-11-18 23:38:44 +01:00
2017-03-26 22:49:01 +02:00
# if defined(SUPPORT_FILEFORMAT_OGG)
2020-09-13 15:38:57 +02:00
// Load OGG file data into Wave structure
2014-09-16 22:51:31 +02:00
// NOTE: Using stb_vorbis library
2020-09-14 19:20:38 +02:00
static Wave LoadOGG ( const unsigned char * fileData , unsigned int fileSize )
2013-11-18 23:38:44 +01:00
{
2017-05-03 14:16:53 +02:00
Wave wave = { 0 } ;
2014-09-03 16:51:28 +02:00
2020-09-13 15:38:57 +02:00
stb_vorbis * oggData = stb_vorbis_open_memory ( ( unsigned char * ) fileData , fileSize , NULL , NULL ) ;
2014-09-03 16:51:28 +02:00
2020-09-13 15:38:57 +02:00
if ( oggData ! = NULL )
2015-07-31 12:31:39 +02:00
{
2020-09-13 15:38:57 +02:00
stb_vorbis_info info = stb_vorbis_get_info ( oggData ) ;
2018-11-06 15:10:50 +01:00
2015-07-31 12:31:39 +02:00
wave . sampleRate = info . sample_rate ;
2016-08-15 16:35:11 +02:00
wave . sampleSize = 16 ; // 16 bit per sample (short)
2015-07-31 12:31:39 +02:00
wave . channels = info . channels ;
2020-09-13 15:38:57 +02:00
wave . sampleCount = ( unsigned int ) stb_vorbis_stream_length_in_samples ( oggData ) * info . channels ; // Independent by channel
2017-01-28 23:02:30 +01:00
2020-09-13 15:38:57 +02:00
float totalSeconds = stb_vorbis_stream_length_in_seconds ( oggData ) ;
if ( totalSeconds > 10 ) TRACELOG ( LOG_WARNING , " WAVE: OGG audio length larger than 10 seconds (%f sec.), that's a big file in memory, consider music streaming " , totalSeconds ) ;
2014-09-03 16:51:28 +02:00
2020-11-19 20:11:11 +01:00
wave . data = ( short * ) RL_MALLOC ( wave . sampleCount * sizeof ( short ) ) ;
2014-09-03 16:51:28 +02:00
2017-01-18 17:04:20 +01:00
// NOTE: Returns the number of samples to process (be careful! we ask for number of shorts!)
2020-11-19 20:11:11 +01:00
stb_vorbis_get_samples_short_interleaved ( oggData , info . channels , ( short * ) wave . data , wave . sampleCount ) ;
2020-09-13 15:38:57 +02:00
TRACELOG ( LOG_INFO , " WAVE: OGG data loaded successfully (%i Hz, %i bit, %s) " , wave . sampleRate , wave . sampleSize , ( wave . channels = = 1 ) ? " Mono " : " Stereo " ) ;
2015-07-31 12:31:39 +02:00
2020-09-13 15:38:57 +02:00
stb_vorbis_close ( oggData ) ;
2015-07-31 12:31:39 +02:00
}
2020-09-13 15:38:57 +02:00
else TRACELOG ( LOG_WARNING , " WAVE: Failed to load OGG data " ) ;
2014-09-03 16:51:28 +02:00
2014-04-19 16:36:49 +02:00
return wave ;
2014-04-09 20:25:26 +02:00
}
2017-03-26 22:49:01 +02:00
# endif
2013-11-18 23:38:44 +01:00
2017-03-26 22:49:01 +02:00
# if defined(SUPPORT_FILEFORMAT_FLAC)
2020-09-13 15:38:57 +02:00
// Load FLAC file data into Wave structure
2016-10-10 18:22:55 +02:00
// NOTE: Using dr_flac library
2020-09-14 19:20:38 +02:00
static Wave LoadFLAC ( const unsigned char * fileData , unsigned int fileSize )
2016-10-10 18:22:55 +02:00
{
2020-03-27 17:16:30 +01:00
Wave wave = { 0 } ;
2016-10-10 18:22:55 +02:00
2020-09-13 15:38:57 +02:00
// Decode the entire FLAC file in one go
2020-11-19 20:11:11 +01:00
unsigned long long int totalFrameCount = 0 ;
wave . data = drflac_open_memory_and_read_pcm_frames_s16 ( fileData , fileSize , & wave . channels , & wave . sampleRate , & totalFrameCount , NULL ) ;
2017-01-28 23:02:30 +01:00
2020-09-13 15:38:57 +02:00
if ( wave . data ! = NULL )
2020-03-27 17:16:30 +01:00
{
2020-11-19 20:11:11 +01:00
wave . sampleCount = ( unsigned int ) totalFrameCount * wave . channels ;
2020-03-27 17:16:30 +01:00
wave . sampleSize = 16 ;
2016-12-26 10:52:57 +01:00
2020-09-13 15:38:57 +02:00
TRACELOG ( LOG_INFO , " WAVE: FLAC data loaded successfully (%i Hz, %i bit, %s) " , wave . sampleRate , wave . sampleSize , ( wave . channels = = 1 ) ? " Mono " : " Stereo " ) ;
2020-03-27 17:16:30 +01:00
}
2020-09-13 15:38:57 +02:00
else TRACELOG ( LOG_WARNING , " WAVE: Failed to load FLAC data " ) ;
2020-11-03 23:47:33 +01:00
2018-09-19 15:57:46 +02:00
return wave ;
}
# endif
# if defined(SUPPORT_FILEFORMAT_MP3)
2020-09-13 15:38:57 +02:00
// Load MP3 file data into Wave structure
2018-09-19 15:57:46 +02:00
// NOTE: Using dr_mp3 library
2020-09-14 19:20:38 +02:00
static Wave LoadMP3 ( const unsigned char * fileData , unsigned int fileSize )
2018-09-19 15:57:46 +02:00
{
2018-10-17 19:39:16 +02:00
Wave wave = { 0 } ;
2020-09-13 15:38:57 +02:00
drmp3_config config = { 0 } ;
2020-11-03 23:47:33 +01:00
2020-09-13 15:38:57 +02:00
// Decode the entire MP3 file in one go
2020-02-10 10:56:48 +01:00
unsigned long long int totalFrameCount = 0 ;
2020-11-15 14:04:28 +01:00
wave . data = drmp3_open_memory_and_read_pcm_frames_f32 ( fileData , fileSize , & config , & totalFrameCount , NULL ) ;
2018-11-06 15:10:50 +01:00
2020-09-13 15:38:57 +02:00
if ( wave . data ! = NULL )
2020-03-27 17:16:30 +01:00
{
2020-11-15 14:04:28 +01:00
wave . channels = config . channels ;
wave . sampleRate = config . sampleRate ;
2020-03-27 17:16:30 +01:00
wave . sampleCount = ( int ) totalFrameCount * wave . channels ;
wave . sampleSize = 32 ;
2018-09-19 15:57:46 +02:00
2020-03-27 17:16:30 +01:00
// NOTE: Only support up to 2 channels (mono, stereo)
2020-09-13 15:38:57 +02:00
// TODO: Really?
if ( wave . channels > 2 ) TRACELOG ( LOG_WARNING , " WAVE: MP3 channels number (%i) not supported " , wave . channels ) ;
2017-01-28 23:02:30 +01:00
2020-09-13 15:38:57 +02:00
TRACELOG ( LOG_INFO , " WAVE: MP3 file loaded successfully (%i Hz, %i bit, %s) " , wave . sampleRate , wave . sampleSize , ( wave . channels = = 1 ) ? " Mono " : " Stereo " ) ;
2020-03-27 17:16:30 +01:00
}
2020-09-13 15:38:57 +02:00
else TRACELOG ( LOG_WARNING , " WAVE: Failed to load MP3 data " ) ;
2020-07-31 12:13:04 +02:00
2016-10-10 18:22:55 +02:00
return wave ;
}
2017-03-26 22:49:01 +02:00
# endif
2016-10-10 18:22:55 +02:00
2015-07-31 12:31:39 +02:00
// Some required functions for audio standalone module version
2019-01-10 16:32:40 +01:00
# if defined(RAUDIO_STANDALONE)
2017-03-29 00:35:42 +02:00
// Check file extension
2020-05-24 15:48:07 +02:00
static bool IsFileExtension ( const char * fileName , const char * ext )
2015-07-31 12:31:39 +02:00
{
2017-03-29 00:35:42 +02:00
bool result = false ;
const char * fileExt ;
2018-11-06 15:10:50 +01:00
2017-03-29 00:35:42 +02:00
if ( ( fileExt = strrchr ( fileName , ' . ' ) ) ! = NULL )
{
if ( strcmp ( fileExt , ext ) = = 0 ) result = true ;
}
return result ;
2015-07-31 12:31:39 +02:00
}
2020-05-24 15:48:07 +02:00
2021-05-07 15:38:13 +02:00
// Get pointer to extension for a filename string (includes the dot: .png)
static const char * GetFileExtension ( const char * fileName )
{
const char * dot = strrchr ( fileName , ' . ' ) ;
if ( ! dot | | dot = = fileName ) return NULL ;
return dot ;
}
// Check if two text string are equal
// REQUIRES: strcmp()
static bool TextIsEqual ( const char * text1 , const char * text2 )
{
bool result = false ;
if ( strcmp ( text1 , text2 ) = = 0 ) result = true ;
return result ;
}
// Get lower case version of provided string
// REQUIRES: tolower()
static const char * TextToLower ( const char * text )
{
# define MAX_TEXT_BUFFER_LENGTH 1024
static char buffer [ MAX_TEXT_BUFFER_LENGTH ] = { 0 } ;
for ( int i = 0 ; i < MAX_TEXT_BUFFER_LENGTH ; i + + )
{
if ( text [ i ] ! = ' \0 ' )
{
buffer [ i ] = ( char ) tolower ( text [ i ] ) ;
//if ((text[i] >= 'A') && (text[i] <= 'Z')) buffer[i] = text[i] + 32;
}
else { buffer [ i ] = ' \0 ' ; break ; }
}
return buffer ;
}
2020-07-31 12:13:04 +02:00
// Load data from file into a buffer
static unsigned char * LoadFileData ( const char * fileName , unsigned int * bytesRead )
{
unsigned char * data = NULL ;
* bytesRead = 0 ;
if ( fileName ! = NULL )
{
FILE * file = fopen ( fileName , " rb " ) ;
if ( file ! = NULL )
{
// WARNING: On binary streams SEEK_END could not be found,
// using fseek() and ftell() could not work in some (rare) cases
fseek ( file , 0 , SEEK_END ) ;
int size = ftell ( file ) ;
fseek ( file , 0 , SEEK_SET ) ;
if ( size > 0 )
{
data = ( unsigned char * ) RL_MALLOC ( size * sizeof ( unsigned char ) ) ;
// NOTE: fread() returns number of read elements instead of bytes, so we read [1 byte, size elements]
unsigned int count = ( unsigned int ) fread ( data , sizeof ( unsigned char ) , size , file ) ;
* bytesRead = count ;
if ( count ! = size ) TRACELOG ( LOG_WARNING , " FILEIO: [%s] File partially loaded " , fileName ) ;
else TRACELOG ( LOG_INFO , " FILEIO: [%s] File loaded successfully " , fileName ) ;
}
else TRACELOG ( LOG_WARNING , " FILEIO: [%s] Failed to read file " , fileName ) ;
fclose ( file ) ;
}
else TRACELOG ( LOG_WARNING , " FILEIO: [%s] Failed to open file " , fileName ) ;
}
else TRACELOG ( LOG_WARNING , " FILEIO: File name provided is not valid " ) ;
return data ;
}
// Save data to file from buffer
2020-11-22 00:10:16 +01:00
static bool SaveFileData ( const char * fileName , void * data , unsigned int bytesToWrite )
2020-07-31 12:13:04 +02:00
{
if ( fileName ! = NULL )
{
FILE * file = fopen ( fileName , " wb " ) ;
if ( file ! = NULL )
{
unsigned int count = ( unsigned int ) fwrite ( data , sizeof ( unsigned char ) , bytesToWrite , file ) ;
if ( count = = 0 ) TRACELOG ( LOG_WARNING , " FILEIO: [%s] Failed to write file " , fileName ) ;
else if ( count ! = bytesToWrite ) TRACELOG ( LOG_WARNING , " FILEIO: [%s] File partially written " , fileName ) ;
else TRACELOG ( LOG_INFO , " FILEIO: [%s] File saved successfully " , fileName ) ;
fclose ( file ) ;
}
else TRACELOG ( LOG_WARNING , " FILEIO: [%s] Failed to open file " , fileName ) ;
}
else TRACELOG ( LOG_WARNING , " FILEIO: File name provided is not valid " ) ;
}
2020-05-24 15:48:07 +02:00
// Save text data to file (write), string must be '\0' terminated
2020-11-22 00:10:16 +01:00
static bool SaveFileText ( const char * fileName , char * text )
2020-05-24 15:48:07 +02:00
{
if ( fileName ! = NULL )
{
FILE * file = fopen ( fileName , " wt " ) ;
if ( file ! = NULL )
{
int count = fprintf ( file , " %s " , text ) ;
if ( count = = 0 ) TRACELOG ( LOG_WARNING , " FILEIO: [%s] Failed to write text file " , fileName ) ;
else TRACELOG ( LOG_INFO , " FILEIO: [%s] Text file saved successfully " , fileName ) ;
fclose ( file ) ;
}
else TRACELOG ( LOG_WARNING , " FILEIO: [%s] Failed to open text file " , fileName ) ;
}
else TRACELOG ( LOG_WARNING , " FILEIO: File name provided is not valid " ) ;
}
2017-02-11 23:34:41 +01:00
# endif
2019-02-12 15:53:34 +01:00
# undef AudioBuffer