Merge pull request #93831 from what-is-a-git/wav-runtime

Add runtime file loading to `AudioStreamWAV`
This commit is contained in:
Thaddeus Crews 2024-12-03 14:40:42 -06:00
commit 42eb4fbc07
No known key found for this signature in database
GPG Key ID: 62181B86FE9E5D84
6 changed files with 611 additions and 567 deletions

View File

@ -11,6 +11,38 @@
<link title="Runtime file loading and saving">$DOCS_URL/tutorials/io/runtime_file_loading_and_saving.html</link>
</tutorials>
<methods>
<method name="load_from_buffer" qualifiers="static">
<return type="AudioStreamWAV" />
<param index="0" name="buffer" type="PackedByteArray" />
<param index="1" name="options" type="Dictionary" default="{}" />
<description>
Creates a new [AudioStreamWAV] instance from the given buffer. The keys and values of [param options] match the properties of [ResourceImporterWAV].
The usage of [param options] is identical to [method AudioStreamWAV.load_from_file].
</description>
</method>
<method name="load_from_file" qualifiers="static">
<return type="AudioStreamWAV" />
<param index="0" name="path" type="String" />
<param index="1" name="options" type="Dictionary" default="{}" />
<description>
Creates a new [AudioStreamWAV] instance from the given file path. The keys and values of [param options] match the properties of [ResourceImporterWAV].
[b]Example:[/b] Load the first file dropped as a WAV and play it:
[codeblock]
@onready var audio_player = $AudioStreamPlayer
func _ready():
get_window().files_dropped.connect(_on_files_dropped)
func _on_files_dropped(files):
if files[0].get_extension() == "wav":
audio_player.stream = AudioStreamWAV.load_from_file(files[0], {
"force/max_rate": true,
"force/max_rate_hz": 11025
})
audio_player.play()
[/codeblock]
</description>
</method>
<method name="save_to_wav">
<return type="int" enum="Error" />
<param index="0" name="path" type="String" />

View File

@ -33,10 +33,6 @@
#include "core/io/file_access.h"
#include "core/io/marshalls.h"
#include "core/io/resource_saver.h"
#include "scene/resources/audio_stream_wav.h"
const float TRIM_DB_LIMIT = -50;
const int TRIM_FADE_OUT_FRAMES = 500;
String ResourceImporterWAV::get_importer_name() const {
return "wav";
@ -95,469 +91,13 @@ void ResourceImporterWAV::get_import_options(const String &p_path, List<ImportOp
}
Error ResourceImporterWAV::import(ResourceUID::ID p_source_id, const String &p_source_file, const String &p_save_path, const HashMap<StringName, Variant> &p_options, List<String> *r_platform_variants, List<String> *r_gen_files, Variant *r_metadata) {
/* STEP 1, READ WAVE FILE */
Error err;
Ref<FileAccess> file = FileAccess::open(p_source_file, FileAccess::READ, &err);
ERR_FAIL_COND_V_MSG(err != OK, ERR_CANT_OPEN, "Cannot open file '" + p_source_file + "'.");
/* CHECK RIFF */
char riff[5];
riff[4] = 0;
file->get_buffer((uint8_t *)&riff, 4); //RIFF
if (riff[0] != 'R' || riff[1] != 'I' || riff[2] != 'F' || riff[3] != 'F') {
ERR_FAIL_V_MSG(ERR_FILE_UNRECOGNIZED, vformat("Not a WAV file. File should start with 'RIFF', but found '%s', in file of size %d bytes", riff, file->get_length()));
Dictionary options;
for (const KeyValue<StringName, Variant> &pair : p_options) {
options[pair.key] = pair.value;
}
/* GET FILESIZE */
// The file size in header is 8 bytes less than the actual size.
// See https://docs.fileformat.com/audio/wav/
const int FILE_SIZE_HEADER_OFFSET = 8;
uint32_t file_size_header = file->get_32() + FILE_SIZE_HEADER_OFFSET;
uint64_t file_size = file->get_length();
if (file_size != file_size_header) {
WARN_PRINT(vformat("File size %d is %s than the expected size %d. (%s)", file_size, file_size > file_size_header ? "larger" : "smaller", file_size_header, p_source_file));
}
/* CHECK WAVE */
char wave[5];
wave[4] = 0;
file->get_buffer((uint8_t *)&wave, 4); //WAVE
if (wave[0] != 'W' || wave[1] != 'A' || wave[2] != 'V' || wave[3] != 'E') {
ERR_FAIL_V_MSG(ERR_FILE_UNRECOGNIZED, vformat("Not a WAV file. Header should contain 'WAVE', but found '%s', in file of size %d bytes", wave, file->get_length()));
}
// Let users override potential loop points from the WAV.
// We parse the WAV loop points only with "Detect From WAV" (0).
int import_loop_mode = p_options["edit/loop_mode"];
int format_bits = 0;
int format_channels = 0;
AudioStreamWAV::LoopMode loop_mode = AudioStreamWAV::LOOP_DISABLED;
uint16_t compression_code = 1;
bool format_found = false;
bool data_found = false;
int format_freq = 0;
int loop_begin = 0;
int loop_end = 0;
int frames = 0;
Vector<float> data;
while (!file->eof_reached()) {
/* chunk */
char chunkID[4];
file->get_buffer((uint8_t *)&chunkID, 4); //RIFF
/* chunk size */
uint32_t chunksize = file->get_32();
uint32_t file_pos = file->get_position(); //save file pos, so we can skip to next chunk safely
if (file->eof_reached()) {
//ERR_PRINT("EOF REACH");
break;
}
if (chunkID[0] == 'f' && chunkID[1] == 'm' && chunkID[2] == 't' && chunkID[3] == ' ' && !format_found) {
/* IS FORMAT CHUNK */
//Issue: #7755 : Not a bug - usage of other formats (format codes) are unsupported in current importer version.
//Consider revision for engine version 3.0
compression_code = file->get_16();
if (compression_code != 1 && compression_code != 3) {
ERR_FAIL_V_MSG(ERR_INVALID_DATA, "Format not supported for WAVE file (not PCM). Save WAVE files as uncompressed PCM or IEEE float instead.");
}
format_channels = file->get_16();
if (format_channels != 1 && format_channels != 2) {
ERR_FAIL_V_MSG(ERR_INVALID_DATA, "Format not supported for WAVE file (not stereo or mono).");
}
format_freq = file->get_32(); //sampling rate
file->get_32(); // average bits/second (unused)
file->get_16(); // block align (unused)
format_bits = file->get_16(); // bits per sample
if (format_bits % 8 || format_bits == 0) {
ERR_FAIL_V_MSG(ERR_INVALID_DATA, "Invalid amount of bits in the sample (should be one of 8, 16, 24 or 32).");
}
if (compression_code == 3 && format_bits % 32) {
ERR_FAIL_V_MSG(ERR_INVALID_DATA, "Invalid amount of bits in the IEEE float sample (should be 32 or 64).");
}
/* Don't need anything else, continue */
format_found = true;
}
if (chunkID[0] == 'd' && chunkID[1] == 'a' && chunkID[2] == 't' && chunkID[3] == 'a' && !data_found) {
/* IS DATA CHUNK */
data_found = true;
if (!format_found) {
ERR_PRINT("'data' chunk before 'format' chunk found.");
break;
}
uint64_t remaining_bytes = file_size - file_pos;
frames = chunksize;
if (remaining_bytes < chunksize) {
WARN_PRINT(vformat("Data chunk size is smaller than expected. Proceeding with actual data size. (%s)", p_source_file));
frames = remaining_bytes;
}
ERR_FAIL_COND_V(format_channels == 0, ERR_INVALID_DATA);
frames /= format_channels;
frames /= (format_bits >> 3);
/*print_line("chunksize: "+itos(chunksize));
print_line("channels: "+itos(format_channels));
print_line("bits: "+itos(format_bits));
*/
data.resize(frames * format_channels);
if (compression_code == 1) {
if (format_bits == 8) {
for (int i = 0; i < frames * format_channels; i++) {
// 8 bit samples are UNSIGNED
data.write[i] = int8_t(file->get_8() - 128) / 128.f;
}
} else if (format_bits == 16) {
for (int i = 0; i < frames * format_channels; i++) {
//16 bit SIGNED
data.write[i] = int16_t(file->get_16()) / 32768.f;
}
} else {
for (int i = 0; i < frames * format_channels; i++) {
//16+ bits samples are SIGNED
// if sample is > 16 bits, just read extra bytes
uint32_t s = 0;
for (int b = 0; b < (format_bits >> 3); b++) {
s |= ((uint32_t)file->get_8()) << (b * 8);
}
s <<= (32 - format_bits);
data.write[i] = (int32_t(s) >> 16) / 32768.f;
}
}
} else if (compression_code == 3) {
if (format_bits == 32) {
for (int i = 0; i < frames * format_channels; i++) {
//32 bit IEEE Float
data.write[i] = file->get_float();
}
} else if (format_bits == 64) {
for (int i = 0; i < frames * format_channels; i++) {
//64 bit IEEE Float
data.write[i] = file->get_double();
}
}
}
if (file->eof_reached()) {
ERR_FAIL_V_MSG(ERR_FILE_CORRUPT, "Premature end of file.");
}
}
if (import_loop_mode == 0 && chunkID[0] == 's' && chunkID[1] == 'm' && chunkID[2] == 'p' && chunkID[3] == 'l') {
// Loop point info!
/**
* Consider exploring next document:
* http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/Docs/RIFFNEW.pdf
* Especially on page:
* 16 - 17
* Timestamp:
* 22:38 06.07.2017 GMT
**/
for (int i = 0; i < 10; i++) {
file->get_32(); // i wish to know why should i do this... no doc!
}
// only read 0x00 (loop forward), 0x01 (loop ping-pong) and 0x02 (loop backward)
// Skip anything else because it's not supported, reserved for future uses or sampler specific
// from https://sites.google.com/site/musicgapi/technical-documents/wav-file-format#smpl (loop type values table)
int loop_type = file->get_32();
if (loop_type == 0x00 || loop_type == 0x01 || loop_type == 0x02) {
if (loop_type == 0x00) {
loop_mode = AudioStreamWAV::LOOP_FORWARD;
} else if (loop_type == 0x01) {
loop_mode = AudioStreamWAV::LOOP_PINGPONG;
} else if (loop_type == 0x02) {
loop_mode = AudioStreamWAV::LOOP_BACKWARD;
}
loop_begin = file->get_32();
loop_end = file->get_32();
}
}
// Move to the start of the next chunk. Note that RIFF requires a padding byte for odd
// chunk sizes.
file->seek(file_pos + chunksize + (chunksize & 1));
}
// STEP 2, APPLY CONVERSIONS
bool is16 = format_bits != 8;
int rate = format_freq;
/*
print_line("Input Sample: ");
print_line("\tframes: " + itos(frames));
print_line("\tformat_channels: " + itos(format_channels));
print_line("\t16bits: " + itos(is16));
print_line("\trate: " + itos(rate));
print_line("\tloop: " + itos(loop));
print_line("\tloop begin: " + itos(loop_begin));
print_line("\tloop end: " + itos(loop_end));
*/
//apply frequency limit
bool limit_rate = p_options["force/max_rate"];
int limit_rate_hz = p_options["force/max_rate_hz"];
if (limit_rate && rate > limit_rate_hz && rate > 0 && frames > 0) {
// resample!
int new_data_frames = (int)(frames * (float)limit_rate_hz / (float)rate);
Vector<float> new_data;
new_data.resize(new_data_frames * format_channels);
for (int c = 0; c < format_channels; c++) {
float frac = .0f;
int ipos = 0;
for (int i = 0; i < new_data_frames; i++) {
// Cubic interpolation should be enough.
float y0 = data[MAX(0, ipos - 1) * format_channels + c];
float y1 = data[ipos * format_channels + c];
float y2 = data[MIN(frames - 1, ipos + 1) * format_channels + c];
float y3 = data[MIN(frames - 1, ipos + 2) * format_channels + c];
new_data.write[i * format_channels + c] = Math::cubic_interpolate(y1, y2, y0, y3, frac);
// update position and always keep fractional part within ]0...1]
// in order to avoid 32bit floating point precision errors
frac += (float)rate / (float)limit_rate_hz;
int tpos = (int)Math::floor(frac);
ipos += tpos;
frac -= tpos;
}
}
if (loop_mode) {
loop_begin = (int)(loop_begin * (float)new_data_frames / (float)frames);
loop_end = (int)(loop_end * (float)new_data_frames / (float)frames);
}
data = new_data;
rate = limit_rate_hz;
frames = new_data_frames;
}
bool normalize = p_options["edit/normalize"];
if (normalize) {
float max = 0;
for (int i = 0; i < data.size(); i++) {
float amp = Math::abs(data[i]);
if (amp > max) {
max = amp;
}
}
if (max > 0) {
float mult = 1.0 / max;
for (int i = 0; i < data.size(); i++) {
data.write[i] *= mult;
}
}
}
bool trim = p_options["edit/trim"];
if (trim && (loop_mode == AudioStreamWAV::LOOP_DISABLED) && format_channels > 0) {
int first = 0;
int last = (frames / format_channels) - 1;
bool found = false;
float limit = Math::db_to_linear(TRIM_DB_LIMIT);
for (int i = 0; i < data.size() / format_channels; i++) {
float ampChannelSum = 0;
for (int j = 0; j < format_channels; j++) {
ampChannelSum += Math::abs(data[(i * format_channels) + j]);
}
float amp = Math::abs(ampChannelSum / (float)format_channels);
if (!found && amp > limit) {
first = i;
found = true;
}
if (found && amp > limit) {
last = i;
}
}
if (first < last) {
Vector<float> new_data;
new_data.resize((last - first) * format_channels);
for (int i = first; i < last; i++) {
float fadeOutMult = 1;
if (last - i < TRIM_FADE_OUT_FRAMES) {
fadeOutMult = ((float)(last - i - 1) / (float)TRIM_FADE_OUT_FRAMES);
}
for (int j = 0; j < format_channels; j++) {
new_data.write[((i - first) * format_channels) + j] = data[(i * format_channels) + j] * fadeOutMult;
}
}
data = new_data;
frames = data.size() / format_channels;
}
}
if (import_loop_mode >= 2) {
loop_mode = (AudioStreamWAV::LoopMode)(import_loop_mode - 1);
loop_begin = p_options["edit/loop_begin"];
loop_end = p_options["edit/loop_end"];
// Wrap around to max frames, so `-1` can be used to select the end, etc.
if (loop_begin < 0) {
loop_begin = CLAMP(loop_begin + frames, 0, frames - 1);
}
if (loop_end < 0) {
loop_end = CLAMP(loop_end + frames, 0, frames - 1);
}
}
int compression = p_options["compress/mode"];
bool force_mono = p_options["force/mono"];
if (force_mono && format_channels == 2) {
Vector<float> new_data;
new_data.resize(data.size() / 2);
for (int i = 0; i < frames; i++) {
new_data.write[i] = (data[i * 2 + 0] + data[i * 2 + 1]) / 2.0;
}
data = new_data;
format_channels = 1;
}
bool force_8_bit = p_options["force/8_bit"];
if (force_8_bit) {
is16 = false;
}
Vector<uint8_t> pcm_data;
AudioStreamWAV::Format dst_format;
if (compression == 1) {
dst_format = AudioStreamWAV::FORMAT_IMA_ADPCM;
if (format_channels == 1) {
_compress_ima_adpcm(data, pcm_data);
} else {
//byte interleave
Vector<float> left;
Vector<float> right;
int tframes = data.size() / 2;
left.resize(tframes);
right.resize(tframes);
for (int i = 0; i < tframes; i++) {
left.write[i] = data[i * 2 + 0];
right.write[i] = data[i * 2 + 1];
}
Vector<uint8_t> bleft;
Vector<uint8_t> bright;
_compress_ima_adpcm(left, bleft);
_compress_ima_adpcm(right, bright);
int dl = bleft.size();
pcm_data.resize(dl * 2);
uint8_t *w = pcm_data.ptrw();
const uint8_t *rl = bleft.ptr();
const uint8_t *rr = bright.ptr();
for (int i = 0; i < dl; i++) {
w[i * 2 + 0] = rl[i];
w[i * 2 + 1] = rr[i];
}
}
} else {
dst_format = is16 ? AudioStreamWAV::FORMAT_16_BITS : AudioStreamWAV::FORMAT_8_BITS;
bool enforce16 = is16 || compression == 2;
pcm_data.resize(data.size() * (enforce16 ? 2 : 1));
{
uint8_t *w = pcm_data.ptrw();
int ds = data.size();
for (int i = 0; i < ds; i++) {
if (enforce16) {
int16_t v = CLAMP(data[i] * 32768, -32768, 32767);
encode_uint16(v, &w[i * 2]);
} else {
int8_t v = CLAMP(data[i] * 128, -128, 127);
w[i] = v;
}
}
}
}
Vector<uint8_t> dst_data;
if (compression == 2) {
dst_format = AudioStreamWAV::FORMAT_QOA;
qoa_desc desc = {};
uint32_t qoa_len = 0;
desc.samplerate = rate;
desc.samples = frames;
desc.channels = format_channels;
void *encoded = qoa_encode((short *)pcm_data.ptr(), &desc, &qoa_len);
if (encoded) {
dst_data.resize(qoa_len);
memcpy(dst_data.ptrw(), encoded, qoa_len);
QOA_FREE(encoded);
}
} else {
dst_data = pcm_data;
}
Ref<AudioStreamWAV> sample;
sample.instantiate();
sample->set_data(dst_data);
sample->set_format(dst_format);
sample->set_mix_rate(rate);
sample->set_loop_mode(loop_mode);
sample->set_loop_begin(loop_begin);
sample->set_loop_end(loop_end);
sample->set_stereo(format_channels == 2);
Ref<AudioStreamWAV> sample = AudioStreamWAV::load_from_file(p_source_file, options);
ResourceSaver::save(sample, p_save_path + ".sample");
return OK;
}

View File

@ -32,6 +32,7 @@
#define RESOURCE_IMPORTER_WAV_H
#include "core/io/resource_importer.h"
#include "scene/resources/audio_stream_wav.h"
class ResourceImporterWAV : public ResourceImporter {
GDCLASS(ResourceImporterWAV, ResourceImporter);
@ -49,97 +50,6 @@ public:
virtual void get_import_options(const String &p_path, List<ImportOption> *r_options, int p_preset = 0) const override;
virtual bool get_option_visibility(const String &p_path, const String &p_option, const HashMap<StringName, Variant> &p_options) const override;
static void _compress_ima_adpcm(const Vector<float> &p_data, Vector<uint8_t> &dst_data) {
static const int16_t _ima_adpcm_step_table[89] = {
7, 8, 9, 10, 11, 12, 13, 14, 16, 17,
19, 21, 23, 25, 28, 31, 34, 37, 41, 45,
50, 55, 60, 66, 73, 80, 88, 97, 107, 118,
130, 143, 157, 173, 190, 209, 230, 253, 279, 307,
337, 371, 408, 449, 494, 544, 598, 658, 724, 796,
876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066,
2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358,
5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899,
15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767
};
static const int8_t _ima_adpcm_index_table[16] = {
-1, -1, -1, -1, 2, 4, 6, 8,
-1, -1, -1, -1, 2, 4, 6, 8
};
int datalen = p_data.size();
int datamax = datalen;
if (datalen & 1) {
datalen++;
}
dst_data.resize(datalen / 2 + 4);
uint8_t *w = dst_data.ptrw();
int i, step_idx = 0, prev = 0;
uint8_t *out = w;
const float *in = p_data.ptr();
// Initial value is zero.
*(out++) = 0;
*(out++) = 0;
// Table index initial value.
*(out++) = 0;
// Unused.
*(out++) = 0;
for (i = 0; i < datalen; i++) {
int step, diff, vpdiff, mask;
uint8_t nibble;
int16_t xm_sample;
if (i >= datamax) {
xm_sample = 0;
} else {
xm_sample = CLAMP(in[i] * 32767.0, -32768, 32767);
}
diff = (int)xm_sample - prev;
nibble = 0;
step = _ima_adpcm_step_table[step_idx];
vpdiff = step >> 3;
if (diff < 0) {
nibble = 8;
diff = -diff;
}
mask = 4;
while (mask) {
if (diff >= step) {
nibble |= mask;
diff -= step;
vpdiff += step;
}
step >>= 1;
mask >>= 1;
}
if (nibble & 8) {
prev -= vpdiff;
} else {
prev += vpdiff;
}
prev = CLAMP(prev, -32768, 32767);
step_idx += _ima_adpcm_index_table[nibble];
step_idx = CLAMP(step_idx, 0, 88);
if (i & 1) {
*out |= nibble << 4;
out++;
} else {
*out = nibble;
}
}
}
virtual Error import(ResourceUID::ID p_source_id, const String &p_source_file, const String &p_save_path, const HashMap<StringName, Variant> &p_options, List<String> *r_platform_variants, List<String> *r_gen_files = nullptr, Variant *r_metadata = nullptr) override;
virtual bool can_import_threaded() const override { return true; }

View File

@ -30,9 +30,12 @@
#include "audio_stream_wav.h"
#include "core/io/file_access.h"
#include "core/io/file_access_memory.h"
#include "core/io/marshalls.h"
const float TRIM_DB_LIMIT = -50;
const int TRIM_FADE_OUT_FRAMES = 500;
void AudioStreamPlaybackWAV::start(double p_from_pos) {
if (base->format == AudioStreamWAV::FORMAT_IMA_ADPCM) {
//no seeking in IMA_ADPCM
@ -721,6 +724,9 @@ Ref<AudioSample> AudioStreamWAV::generate_sample() const {
}
void AudioStreamWAV::_bind_methods() {
ClassDB::bind_static_method("AudioStreamWAV", D_METHOD("load_from_file", "path", "options"), &AudioStreamWAV::load_from_file, DEFVAL(Dictionary()));
ClassDB::bind_static_method("AudioStreamWAV", D_METHOD("load_from_buffer", "buffer", "options"), &AudioStreamWAV::load_from_buffer, DEFVAL(Dictionary()));
ClassDB::bind_method(D_METHOD("set_data", "data"), &AudioStreamWAV::set_data);
ClassDB::bind_method(D_METHOD("get_data"), &AudioStreamWAV::get_data);
@ -763,6 +769,477 @@ void AudioStreamWAV::_bind_methods() {
BIND_ENUM_CONSTANT(LOOP_BACKWARD);
}
Ref<AudioStreamWAV> AudioStreamWAV::load_from_buffer(const Vector<uint8_t> &p_file_data, const Dictionary &p_options) {
// /* STEP 1, READ WAVE FILE */
Ref<FileAccessMemory> file;
file.instantiate();
Error err = file->open_custom(p_file_data.ptr(), p_file_data.size());
ERR_FAIL_COND_V_MSG(err != OK, Ref<AudioStreamWAV>(), "Cannot create memfile for WAV file buffer.");
/* CHECK RIFF */
char riff[5];
riff[4] = 0;
file->get_buffer((uint8_t *)&riff, 4); //RIFF
if (riff[0] != 'R' || riff[1] != 'I' || riff[2] != 'F' || riff[3] != 'F') {
ERR_FAIL_V_MSG(Ref<AudioStreamWAV>(), vformat("Not a WAV file. File should start with 'RIFF', but found '%s', in file of size %d bytes", riff, file->get_length()));
}
/* GET FILESIZE */
// The file size in header is 8 bytes less than the actual size.
// See https://docs.fileformat.com/audio/wav/
const int FILE_SIZE_HEADER_OFFSET = 8;
uint32_t file_size_header = file->get_32() + FILE_SIZE_HEADER_OFFSET;
uint64_t file_size = file->get_length();
if (file_size != file_size_header) {
WARN_PRINT(vformat("File size %d is %s than the expected size %d.", file_size, file_size > file_size_header ? "larger" : "smaller", file_size_header));
}
/* CHECK WAVE */
char wave[5];
wave[4] = 0;
file->get_buffer((uint8_t *)&wave, 4); //WAVE
if (wave[0] != 'W' || wave[1] != 'A' || wave[2] != 'V' || wave[3] != 'E') {
ERR_FAIL_V_MSG(Ref<AudioStreamWAV>(), vformat("Not a WAV file. Header should contain 'WAVE', but found '%s', in file of size %d bytes", wave, file->get_length()));
}
// Let users override potential loop points from the WAV.
// We parse the WAV loop points only with "Detect From WAV" (0).
int import_loop_mode = p_options["edit/loop_mode"];
int format_bits = 0;
int format_channels = 0;
AudioStreamWAV::LoopMode loop_mode = AudioStreamWAV::LOOP_DISABLED;
uint16_t compression_code = 1;
bool format_found = false;
bool data_found = false;
int format_freq = 0;
int loop_begin = 0;
int loop_end = 0;
int frames = 0;
Vector<float> data;
while (!file->eof_reached()) {
/* chunk */
char chunk_id[4];
file->get_buffer((uint8_t *)&chunk_id, 4); //RIFF
/* chunk size */
uint32_t chunksize = file->get_32();
uint32_t file_pos = file->get_position(); //save file pos, so we can skip to next chunk safely
if (file->eof_reached()) {
//ERR_PRINT("EOF REACH");
break;
}
if (chunk_id[0] == 'f' && chunk_id[1] == 'm' && chunk_id[2] == 't' && chunk_id[3] == ' ' && !format_found) {
/* IS FORMAT CHUNK */
//Issue: #7755 : Not a bug - usage of other formats (format codes) are unsupported in current importer version.
//Consider revision for engine version 3.0
compression_code = file->get_16();
if (compression_code != 1 && compression_code != 3) {
ERR_FAIL_V_MSG(Ref<AudioStreamWAV>(), "Format not supported for WAVE file (not PCM). Save WAVE files as uncompressed PCM or IEEE float instead.");
}
format_channels = file->get_16();
if (format_channels != 1 && format_channels != 2) {
ERR_FAIL_V_MSG(Ref<AudioStreamWAV>(), "Format not supported for WAVE file (not stereo or mono).");
}
format_freq = file->get_32(); //sampling rate
file->get_32(); // average bits/second (unused)
file->get_16(); // block align (unused)
format_bits = file->get_16(); // bits per sample
if (format_bits % 8 || format_bits == 0) {
ERR_FAIL_V_MSG(Ref<AudioStreamWAV>(), "Invalid amount of bits in the sample (should be one of 8, 16, 24 or 32).");
}
if (compression_code == 3 && format_bits % 32) {
ERR_FAIL_V_MSG(Ref<AudioStreamWAV>(), "Invalid amount of bits in the IEEE float sample (should be 32 or 64).");
}
/* Don't need anything else, continue */
format_found = true;
}
if (chunk_id[0] == 'd' && chunk_id[1] == 'a' && chunk_id[2] == 't' && chunk_id[3] == 'a' && !data_found) {
/* IS DATA CHUNK */
data_found = true;
if (!format_found) {
ERR_PRINT("'data' chunk before 'format' chunk found.");
break;
}
uint64_t remaining_bytes = file_size - file_pos;
frames = chunksize;
if (remaining_bytes < chunksize) {
WARN_PRINT("Data chunk size is smaller than expected. Proceeding with actual data size.");
frames = remaining_bytes;
}
ERR_FAIL_COND_V(format_channels == 0, Ref<AudioStreamWAV>());
frames /= format_channels;
frames /= (format_bits >> 3);
/*print_line("chunksize: "+itos(chunksize));
print_line("channels: "+itos(format_channels));
print_line("bits: "+itos(format_bits));
*/
data.resize(frames * format_channels);
if (compression_code == 1) {
if (format_bits == 8) {
for (int i = 0; i < frames * format_channels; i++) {
// 8 bit samples are UNSIGNED
data.write[i] = int8_t(file->get_8() - 128) / 128.f;
}
} else if (format_bits == 16) {
for (int i = 0; i < frames * format_channels; i++) {
//16 bit SIGNED
data.write[i] = int16_t(file->get_16()) / 32768.f;
}
} else {
for (int i = 0; i < frames * format_channels; i++) {
//16+ bits samples are SIGNED
// if sample is > 16 bits, just read extra bytes
uint32_t s = 0;
for (int b = 0; b < (format_bits >> 3); b++) {
s |= ((uint32_t)file->get_8()) << (b * 8);
}
s <<= (32 - format_bits);
data.write[i] = (int32_t(s) >> 16) / 32768.f;
}
}
} else if (compression_code == 3) {
if (format_bits == 32) {
for (int i = 0; i < frames * format_channels; i++) {
//32 bit IEEE Float
data.write[i] = file->get_float();
}
} else if (format_bits == 64) {
for (int i = 0; i < frames * format_channels; i++) {
//64 bit IEEE Float
data.write[i] = file->get_double();
}
}
}
// This is commented out due to some weird edge case seemingly in FileAccessMemory, doesn't seem to have any side effects though.
// if (file->eof_reached()) {
// ERR_FAIL_V_MSG(Ref<AudioStreamWAV>(), "Premature end of file.");
// }
}
if (import_loop_mode == 0 && chunk_id[0] == 's' && chunk_id[1] == 'm' && chunk_id[2] == 'p' && chunk_id[3] == 'l') {
// Loop point info!
/**
* Consider exploring next document:
* http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/Docs/RIFFNEW.pdf
* Especially on page:
* 16 - 17
* Timestamp:
* 22:38 06.07.2017 GMT
**/
for (int i = 0; i < 10; i++) {
file->get_32(); // i wish to know why should i do this... no doc!
}
// only read 0x00 (loop forward), 0x01 (loop ping-pong) and 0x02 (loop backward)
// Skip anything else because it's not supported, reserved for future uses or sampler specific
// from https://sites.google.com/site/musicgapi/technical-documents/wav-file-format#smpl (loop type values table)
int loop_type = file->get_32();
if (loop_type == 0x00 || loop_type == 0x01 || loop_type == 0x02) {
if (loop_type == 0x00) {
loop_mode = AudioStreamWAV::LOOP_FORWARD;
} else if (loop_type == 0x01) {
loop_mode = AudioStreamWAV::LOOP_PINGPONG;
} else if (loop_type == 0x02) {
loop_mode = AudioStreamWAV::LOOP_BACKWARD;
}
loop_begin = file->get_32();
loop_end = file->get_32();
}
}
// Move to the start of the next chunk. Note that RIFF requires a padding byte for odd
// chunk sizes.
file->seek(file_pos + chunksize + (chunksize & 1));
}
// STEP 2, APPLY CONVERSIONS
bool is16 = format_bits != 8;
int rate = format_freq;
/*
print_line("Input Sample: ");
print_line("\tframes: " + itos(frames));
print_line("\tformat_channels: " + itos(format_channels));
print_line("\t16bits: " + itos(is16));
print_line("\trate: " + itos(rate));
print_line("\tloop: " + itos(loop));
print_line("\tloop begin: " + itos(loop_begin));
print_line("\tloop end: " + itos(loop_end));
*/
//apply frequency limit
bool limit_rate = p_options["force/max_rate"];
int limit_rate_hz = p_options["force/max_rate_hz"];
if (limit_rate && rate > limit_rate_hz && rate > 0 && frames > 0) {
// resample!
int new_data_frames = (int)(frames * (float)limit_rate_hz / (float)rate);
Vector<float> new_data;
new_data.resize(new_data_frames * format_channels);
for (int c = 0; c < format_channels; c++) {
float frac = 0.0;
int ipos = 0;
for (int i = 0; i < new_data_frames; i++) {
// Cubic interpolation should be enough.
float y0 = data[MAX(0, ipos - 1) * format_channels + c];
float y1 = data[ipos * format_channels + c];
float y2 = data[MIN(frames - 1, ipos + 1) * format_channels + c];
float y3 = data[MIN(frames - 1, ipos + 2) * format_channels + c];
new_data.write[i * format_channels + c] = Math::cubic_interpolate(y1, y2, y0, y3, frac);
// update position and always keep fractional part within ]0...1]
// in order to avoid 32bit floating point precision errors
frac += (float)rate / (float)limit_rate_hz;
int tpos = (int)Math::floor(frac);
ipos += tpos;
frac -= tpos;
}
}
if (loop_mode) {
loop_begin = (int)(loop_begin * (float)new_data_frames / (float)frames);
loop_end = (int)(loop_end * (float)new_data_frames / (float)frames);
}
data = new_data;
rate = limit_rate_hz;
frames = new_data_frames;
}
bool normalize = p_options["edit/normalize"];
if (normalize) {
float max = 0.0;
for (int i = 0; i < data.size(); i++) {
float amp = Math::abs(data[i]);
if (amp > max) {
max = amp;
}
}
if (max > 0) {
float mult = 1.0 / max;
for (int i = 0; i < data.size(); i++) {
data.write[i] *= mult;
}
}
}
bool trim = p_options["edit/trim"];
if (trim && (loop_mode == AudioStreamWAV::LOOP_DISABLED) && format_channels > 0) {
int first = 0;
int last = (frames / format_channels) - 1;
bool found = false;
float limit = Math::db_to_linear(TRIM_DB_LIMIT);
for (int i = 0; i < data.size() / format_channels; i++) {
float amp_channel_sum = 0.0;
for (int j = 0; j < format_channels; j++) {
amp_channel_sum += Math::abs(data[(i * format_channels) + j]);
}
float amp = Math::abs(amp_channel_sum / (float)format_channels);
if (!found && amp > limit) {
first = i;
found = true;
}
if (found && amp > limit) {
last = i;
}
}
if (first < last) {
Vector<float> new_data;
new_data.resize((last - first) * format_channels);
for (int i = first; i < last; i++) {
float fade_out_mult = 1.0;
if (last - i < TRIM_FADE_OUT_FRAMES) {
fade_out_mult = ((float)(last - i - 1) / (float)TRIM_FADE_OUT_FRAMES);
}
for (int j = 0; j < format_channels; j++) {
new_data.write[((i - first) * format_channels) + j] = data[(i * format_channels) + j] * fade_out_mult;
}
}
data = new_data;
frames = data.size() / format_channels;
}
}
if (import_loop_mode >= 2) {
loop_mode = (AudioStreamWAV::LoopMode)(import_loop_mode - 1);
loop_begin = p_options["edit/loop_begin"];
loop_end = p_options["edit/loop_end"];
// Wrap around to max frames, so `-1` can be used to select the end, etc.
if (loop_begin < 0) {
loop_begin = CLAMP(loop_begin + frames, 0, frames - 1);
}
if (loop_end < 0) {
loop_end = CLAMP(loop_end + frames, 0, frames - 1);
}
}
int compression = p_options["compress/mode"];
bool force_mono = p_options["force/mono"];
if (force_mono && format_channels == 2) {
Vector<float> new_data;
new_data.resize(data.size() / 2);
for (int i = 0; i < frames; i++) {
new_data.write[i] = (data[i * 2 + 0] + data[i * 2 + 1]) / 2.0;
}
data = new_data;
format_channels = 1;
}
bool force_8_bit = p_options["force/8_bit"];
if (force_8_bit) {
is16 = false;
}
Vector<uint8_t> pcm_data;
AudioStreamWAV::Format dst_format;
if (compression == 1) {
dst_format = AudioStreamWAV::FORMAT_IMA_ADPCM;
if (format_channels == 1) {
_compress_ima_adpcm(data, pcm_data);
} else {
//byte interleave
Vector<float> left;
Vector<float> right;
int tframes = data.size() / 2;
left.resize(tframes);
right.resize(tframes);
for (int i = 0; i < tframes; i++) {
left.write[i] = data[i * 2 + 0];
right.write[i] = data[i * 2 + 1];
}
Vector<uint8_t> bleft;
Vector<uint8_t> bright;
_compress_ima_adpcm(left, bleft);
_compress_ima_adpcm(right, bright);
int dl = bleft.size();
pcm_data.resize(dl * 2);
uint8_t *w = pcm_data.ptrw();
const uint8_t *rl = bleft.ptr();
const uint8_t *rr = bright.ptr();
for (int i = 0; i < dl; i++) {
w[i * 2 + 0] = rl[i];
w[i * 2 + 1] = rr[i];
}
}
} else {
dst_format = is16 ? AudioStreamWAV::FORMAT_16_BITS : AudioStreamWAV::FORMAT_8_BITS;
bool enforce16 = is16 || compression == 2;
pcm_data.resize(data.size() * (enforce16 ? 2 : 1));
{
uint8_t *w = pcm_data.ptrw();
int ds = data.size();
for (int i = 0; i < ds; i++) {
if (enforce16) {
int16_t v = CLAMP(data[i] * 32768, -32768, 32767);
encode_uint16(v, &w[i * 2]);
} else {
int8_t v = CLAMP(data[i] * 128, -128, 127);
w[i] = v;
}
}
}
}
Vector<uint8_t> dst_data;
if (compression == 2) {
dst_format = AudioStreamWAV::FORMAT_QOA;
qoa_desc desc = {};
uint32_t qoa_len = 0;
desc.samplerate = rate;
desc.samples = frames;
desc.channels = format_channels;
void *encoded = qoa_encode((short *)pcm_data.ptr(), &desc, &qoa_len);
if (encoded) {
dst_data.resize(qoa_len);
memcpy(dst_data.ptrw(), encoded, qoa_len);
QOA_FREE(encoded);
}
} else {
dst_data = pcm_data;
}
Ref<AudioStreamWAV> sample;
sample.instantiate();
sample->set_data(dst_data);
sample->set_format(dst_format);
sample->set_mix_rate(rate);
sample->set_loop_mode(loop_mode);
sample->set_loop_begin(loop_begin);
sample->set_loop_end(loop_end);
sample->set_stereo(format_channels == 2);
return sample;
}
Ref<AudioStreamWAV> AudioStreamWAV::load_from_file(const String &p_path, const Dictionary &p_options) {
Vector<uint8_t> file_data = FileAccess::get_file_as_bytes(p_path);
ERR_FAIL_COND_V_MSG(file_data.is_empty(), Ref<AudioStreamWAV>(), vformat("Cannot open file '%s'.", p_path));
return load_from_buffer(file_data, p_options);
}
AudioStreamWAV::AudioStreamWAV() {}
AudioStreamWAV::~AudioStreamWAV() {}

View File

@ -144,6 +144,9 @@ protected:
static void _bind_methods();
public:
static Ref<AudioStreamWAV> load_from_file(const String &p_path, const Dictionary &p_options);
static Ref<AudioStreamWAV> load_from_buffer(const Vector<uint8_t> &p_file_data, const Dictionary &p_options);
void set_format(Format p_format);
Format get_format() const;
@ -179,6 +182,97 @@ public:
}
virtual Ref<AudioSample> generate_sample() const override;
static void _compress_ima_adpcm(const Vector<float> &p_data, Vector<uint8_t> &r_dst_data) {
static const int16_t _ima_adpcm_step_table[89] = {
7, 8, 9, 10, 11, 12, 13, 14, 16, 17,
19, 21, 23, 25, 28, 31, 34, 37, 41, 45,
50, 55, 60, 66, 73, 80, 88, 97, 107, 118,
130, 143, 157, 173, 190, 209, 230, 253, 279, 307,
337, 371, 408, 449, 494, 544, 598, 658, 724, 796,
876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066,
2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358,
5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899,
15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767
};
static const int8_t _ima_adpcm_index_table[16] = {
-1, -1, -1, -1, 2, 4, 6, 8,
-1, -1, -1, -1, 2, 4, 6, 8
};
int datalen = p_data.size();
int datamax = datalen;
if (datalen & 1) {
datalen++;
}
r_dst_data.resize(datalen / 2 + 4);
uint8_t *w = r_dst_data.ptrw();
int i, step_idx = 0, prev = 0;
uint8_t *out = w;
const float *in = p_data.ptr();
// Initial value is zero.
*(out++) = 0;
*(out++) = 0;
// Table index initial value.
*(out++) = 0;
// Unused.
*(out++) = 0;
for (i = 0; i < datalen; i++) {
int step, diff, vpdiff, mask;
uint8_t nibble;
int16_t xm_sample;
if (i >= datamax) {
xm_sample = 0;
} else {
xm_sample = CLAMP(in[i] * 32767.0, -32768, 32767);
}
diff = (int)xm_sample - prev;
nibble = 0;
step = _ima_adpcm_step_table[step_idx];
vpdiff = step >> 3;
if (diff < 0) {
nibble = 8;
diff = -diff;
}
mask = 4;
while (mask) {
if (diff >= step) {
nibble |= mask;
diff -= step;
vpdiff += step;
}
step >>= 1;
mask >>= 1;
}
if (nibble & 8) {
prev -= vpdiff;
} else {
prev += vpdiff;
}
prev = CLAMP(prev, -32768, 32767);
step_idx += _ima_adpcm_index_table[nibble];
step_idx = CLAMP(step_idx, 0, 88);
if (i & 1) {
*out |= nibble << 4;
out++;
} else {
*out = nibble;
}
}
}
AudioStreamWAV();
~AudioStreamWAV();
};

View File

@ -30,11 +30,6 @@
#include "audio_effect_record.h"
#ifdef TOOLS_ENABLED
// FIXME: This file shouldn't depend on editor stuff.
#include "editor/import/resource_importer_wav.h"
#endif
void AudioEffectRecordInstance::process(const AudioFrame *p_src_frames, AudioFrame *p_dst_frames, int p_frame_count) {
if (!is_recording) {
for (int i = 0; i < p_frame_count; i++) {
@ -241,12 +236,8 @@ Ref<AudioStreamWAV> AudioEffectRecord::get_recording() const {
Vector<uint8_t> bleft;
Vector<uint8_t> bright;
#ifdef TOOLS_ENABLED
ResourceImporterWAV::_compress_ima_adpcm(left, bleft);
ResourceImporterWAV::_compress_ima_adpcm(right, bright);
#else
ERR_PRINT("AudioEffectRecord cannot do IMA ADPCM compression at runtime.");
#endif
AudioStreamWAV::_compress_ima_adpcm(left, bleft);
AudioStreamWAV::_compress_ima_adpcm(right, bright);
int dl = bleft.size();
dst_data.resize(dl * 2);