Best way to make an Expiration Timer for Temporary Objects

Godot Version

4.4 Release

Question

I’ve broken all of my sound-related functionality into a Sound autoload. All my other objects do is call it and it plays the appropriate sound on the appropriate Channel.

One of the reasons for this is I wanted to be able to deal with executing multiple sound effects, like all the sounds of battle. Each one fires and forgets. So I create a temporary sound AudioStreamPlayer that only needs to exist for the length of the sound. Then I want that AudioStreamPlayer to be deleted from memory. (This is because I ended up with a memory leak when I didn’t do that.)

My solution was to come up with a LifetimeTimer helper object. It takes as an initialization parameter the node it is supposed to delete.

class_name LifetimeTimer extends Timer


@onready var item


func _init(item_to_free: Variant):
	item = item_to_free


func _ready() -> void:
	self.timeout.connect(die)


func die():
	item.queue_free()
	queue_free()

I then call it using this code:

func play(sound: AudioStream, channel: CHANNEL):
	if sound == null:
			return
	var player = AudioStreamPlayer.new()
	Engine.get_main_loop().current_scene.add_child(player)
	player.set_bus(channel_to_string(channel))
	player.set_stream(sound)
	player.play()

	var timer = LifetimeTimer.new(player)
	timer.wait_time = sound.get_length()
	Engine.get_main_loop().current_scene.add_child(timer)
	timer.start()

My question is this: Is there a simpler way to handle this? I wrote this code a year and I am now refactoring it. I feel like there’s a better way to handle this, butI don’t think it’s using await. I am open to suggestions about how to get rid of the need for the LifetimeTimer object. I am even open to suggestions about not using temporary AudioStreamPlayers.

1 Like

You could use a AudioStreamPolyphonic on a single AudioStreamPlayer to avoid adding anything extra to the scene tree.

var polyphone_player: AudioStreamPlayer
var polyphonic: AudioStreamPlaybackPolyphonic

func _ready() -> void:
	polyphone_player = $AudioStreamPlayer
	polyphonic = polyphone_player.get_stream_playback()


func play(sound: AudioStream, channel: CHANNEL) -> void:
	polyphonic.play_stream(sound, 0.0, 0.0, 1.0, 1, channel_to_string(channel))
3 Likes

What I’m doing with my setup is I have a global script PlaySound.gd:

@onready var snd_gui_select = _setup_stream("GUI", 1, 1.2, preload("res://Assets/Sounds/GUISelect.wav"))
#[...other gui sounds...]
@onready var snd_menu_music = _setup_music_stream(preload("res://Assets/Sounds/MenuMusic.wav"))
#[...other music...]
@onready var snd_explosion = _setup_stream("Game", 8, 1.6, preload("res://Assets/Sounds/Explosion.wav"))
#[...other game fx...]

func _setup_stream(bus: String, polyphony: int, variance: int, res: Resource) -> AudioStreamPlayer:
    var asr = AudioStreamRandomizer.new()
    var plyr = AudioStreamPlayer.new()
    plyr.stream = asr # Randomizes pitch
    plyr.bus = bus # We have separate GUI, Game & Music busses
    plyr.max_polyphony = polyphony
    asr.add_stream(0, res)
    asr.random_pitch = variance
    add_child(plyr) # Global
    return plyr

func _setup_music_stream(res: Resource) -> AudioStreamPlayer:
    var plyr = AudioStreamPlayer.new()
    plyr.stream = res
    plyr.bus = "Music"
    add_child(plyr)
    return plyr

#[...]

func menu_music():
    snd_menu_music.play()

func explosion():
    snd_explosion.play()

#[...]

Since it’s global, I can call PlaySound.explosion() or PlaySound.menu_music() from anywhere, and it works. The polyphony lets the player handle more than one sound at a time. The variance randomizes the pitch a bit.

2 Likes

@gertkeno That’s helpful thanks. I’m going to look into that.

##sound.gd Autoload Module
extends Node


signal now_playing(song: Song)
signal add_song_to_music_playlist(song: Song)


enum CHANNEL {
	Master,
	Music,
	SFX,
	UI,
	Ambient,
	Dialogue,
}


@export var default_button_pressed_sound: AudioStream
@export var volume_confirm_sound: AudioStream


@onready var music_player: AudioStreamPlayer = $MusicPlayer
@onready var dialogue_player: AudioStreamPlayer = $DialoguePlayer


func play_music(sound: Variant):
	if sound == AudioStream:
		var temp_sound = Song.new()
		temp_sound.song = sound
		sound = temp_sound
		sound.title = sound.song.resource_name
		sound.album = "Unknown"
	if sound is not Song:
		push_error("%s not a valid song file or AudioStream" % [sound.name])
	if sound.song == null:
		push_error("%s song is empty. No AudioStream assigned." % [sound.resource_path])
	
	print_rich("Song Playing: %s\nby %s" % [sound.title, sound.artist])
	print_rich("Album: %s\nAlbum Link %s" % [sound.album, sound.album_link])
	music_player.set_stream(sound.song)
	music_player.play()
	now_playing.emit(sound)


func pause_music():
	music_player.stream_paused = true


func unpause_music():
	music_player.stream_paused = false


func play_sound_effect(sound: AudioStream):
	play(sound, CHANNEL.SFX)


func play_ui_sound(sound: AudioStream):
	play(sound, CHANNEL.UI)


func play_button_pressed_sound():
	play(default_button_pressed_sound, CHANNEL.UI)


func play_ambient_sound(sound: AudioStream):
	play(sound, CHANNEL.Ambient)


func play_dialogue(sound: AudioStream):
	dialogue_player.set_stream(sound)
	dialogue_player.play()

func play(sound: AudioStream, channel: CHANNEL):
	if sound == null:
			return
	var player = AudioStreamPlayer.new()
	Engine.get_main_loop().current_scene.add_child(player)
	player.set_bus(channel_to_string(channel))
	player.set_stream(sound)
	player.play()

	var timer = LifetimeTimer.new(player)
	timer.wait_time = sound.get_length()
	Engine.get_main_loop().current_scene.add_child(timer)
	timer.start()


func channel_to_string(channel: CHANNEL):
	return CHANNEL.find_key(channel)
## song.gd Resource Definition
@icon("res://addons/dragonforge_dev_sound/assets/icons/music-library.png")
extends Resource

class_name Song

@export var song: AudioStream
@export var title: String
@export var artist: String
@export var album: String
@export var album_link: String

I have a separate AudioStreamPlayer for the music so that the sound effects will never cut off the music. That’s also why I’m using separate AudioStreamPlayers for the SFX though - because when I tried to have multiple polyphony they were getting cut off. Glad I posted more of what I was doing. I’d love to keep the number of nodes being created and deleted down.

1 Like

@hexgrid Thanks for another example of using the polyphony. It’s helpful. I do not want to couple the game’s sound effects or music to the Sound Autoload however because I use it for game jams and other games. I just want to centralize the sound stuff for things like handling volume controls, etc.

I’m curious about the pitch randomizer. Can you explain more about that? Is it a way to use less sound files? I’m interested in your thinking on that.

1 Like

When you have the same sound repeating a bunch (whether it’s footsteps, raindrops, explosions…) if you play them all at identical pitch, you get a couple of undesirable effects:

  • the sound is (literally) monotonous, it’s the exact same thing over and over, and most people find that flat and at least mildly offputting
  • if the sounds are placed close together in time you can get flanging/phasing effects from the sounds interfering with each other

The easy fix for this is to randomize the pitch slightly when playing the sounds; the difference sounds more natural, and reduces the beat frequency-based interference that causes the flanging.

I’m using at least a little pitch variance on every sound I’m playing except the music. There are some sounds (dialogue) that I wouldn’t pitch vary, but interface and game sound effects IMO sound significantly better with variance.

2 Likes

@hexgrid That’s REALLY helpful info, thank you! I’m going to add that in and test it. So even with like game menu clicks you use it?

I definitely use it with menu sounds. It’s particularly noticeable there.

2 Likes

Awesome, thanks. Trying to get the polyphonic part working now, then I’m going to add that.

@gertkeno and @hexgrid I ended up using a combination of both your solutions. However sadly, I couldn’t figure out how to combine them.

This is the method for playing sounds using the same AudioStreamPlayer with multiple channels:

func play(sound: AudioStream, channel: CHANNEL):
	if sound == null:
			return
	var channel_name = channel_to_string(channel)
	var channel_index = _channel_to_bus_index(channel)
	var volume = db_to_linear(AudioServer.get_bus_volume_db(channel_index))
	sound_playback.play_stream(sound, 
								0.0,
								volume,
								1.0,
								AudioServer.PLAYBACK_TYPE_DEFAULT,
								channel_name
	)

This is for the UI sounds to get some variety on the clicks:

func play_ui_sound(sound: AudioStream):
	var randomizer: AudioStreamRandomizer = ui_sound_player.stream
	for stream in randomizer.streams_count:
		randomizer.remove_stream(stream)
	randomizer.add_stream(0, sound)
	ui_sound_player.play()

I could not for the life of me figure out how to combine the two, because the Stream drop-down requires you to pick one or the other.

I was curious if either of you had any thoughts on how to combine them. The full plugin project code can be found here: GitHub - dragonforge-dev/dragonforge-sound

The polyphonic playback supports a pitch shift parameter, you could give it a random range on each call to give the same randomized effect.

By assigning the play stream to a specific channel it will have that channel’s mixed volume, you may be doubling the volume effect by assigning the stream it’s own channel volume.

sound_playback.play_stream(sound, 
							0.0,
							0.0, # volume, keep 0
							randf_range(1.0, 1.15), # pitch scale
							AudioServer.PLAYBACK_TYPE_DEFAULT,
							channel_name
)

Perfect! Very helpful. Thank you! I’m feeling like I should’ve RTFM on the play_stream() function now.

I’ve been doing it in code, but the randomizer produces a stream. AudioStreamPlayer takes a stream, so rather than directly giving it the resource, you can give the resource to a randomizer and assign the randomizer as the stream for the player.

I bet if you look in the Streams section of your second panel there…

1 Like

I am starting to question my strategy for sound; I’ve just started looking in to making some sounds positional, and it looks like doing that needs an AudioStreamPlayer2D or 3D per sound source, so polyphony is suddenly a lot less useful.

Yeah I’ve realized the same thing. But since I’m not doing a multiplayer game I can do sound effects like mining, fighting, etc centrally.

I found out that AudioStreamPlaylist and AudioStreamSynchronized are intended to be able to be embedded in one another. They’re clearly designed for music, but can also be used for sound effects. Like you’ve got a crafting animation and you need three tinks on the anvil, you can put three sound files in a playlist, and play it in the same order every time or a random order. One of those streams can be an AudioStreamRandomizer or they can all be wrapped in one.

Yesterday I worked on creating a SoundEffect resource. That’s when I found all of this. I created tests of each and then realized the power. The one thing that I couldn’t get out of an AudioStreamPlaylist object was being able to pick a single sound from the list to play each time play is called. This is where I am:

@icon("res://addons/dragonforge_sound/assets/icons/sound-effect.svg")
class_name SoundEffect extends Resource


## An AudioStream for the sound effect(s).
##
## AudioStreamMP3 - Used for a single .mp3 sound effect.
## AudioStreamOggVorbis - Used for a single .ogg sound effect.
## AudioStreamWAV - Used for a single .wav sound effect.
## AudioStreamPlaylist - Used for multiple sound effects to be triggered in a
## sequence. If play_only_one_sound (below) is true, only one sound in the list
## will be played each time.
## AudioStreamSynchronized - Used to play a bunch of separate sound files
## together.
@export var stream: AudioStream
## How much pitch variance should be in the sound effect(s) played.
## Default is 1.0 (None). Increasing this number increases variation.
## Recommended variance is between 1.05 and 1.2.
@export var pitch_variance: int = 1.0
## If true and stream is of type AudioStreamPlaylist, only one sound will be 
## picked to be played every time play is called. If shuffle = true for the
## AudioStreamPlaylist then the sound will be rqandomly selected, otherwise
## the next sound on the list will be played.
@export var play_only_one_sound: bool = false
## Human readable name for the sound effect.
@export var title: String
## The name of the sound effect's creator.
@export var artist: String
## The project name.
@export var project: String
## A url link to the project.
@export var project_link: String


var iterator: int = 0


func play(channel: Sound.CHANNEL = Sound.CHANNEL.SFX) -> int:
	if stream is AudioStreamPlaylist and play_only_one_sound == true:
		var playlist: AudioStreamPlaylist
		if playlist.shuffle == true:
			return Sound.play(_get_random_sound(playlist), channel, pitch_variance)
		else:
			return Sound.play(_get_next_sound(playlist), channel, pitch_variance)
			
	return Sound.play(stream, channel, pitch_variance)


func _get_next_sound(playlist: AudioStreamPlaylist) -> AudioStream:
	var sound: AudioStream = playlist.get_list_stream(iterator)
	iterator += 1
	if iterator >= playlist.stream_count:
		iterator = 0
	return sound


func _get_random_sound(playlist: AudioStreamPlaylist) -> AudioStream:
	var random_index: int = randi_range(0, playlist.stream_count)
	return playlist.get_list_stream(random_index)

Now I’m thinking that both my Song resource and my SoundEffect resource would be better off inheriting from AudioStream themselves. The primary purpose of both is to store metadata for sound files because Godot doesn’t pull metadata yet. (I want to be able to track later where I pulled something from if I want to go back into my archives for more.) I’m starting to think I would’ve spent less time on this at this point if I’d just dove into the Godot feature request and fixed the metadata issue there.

P.S. Please don’t do positional sound for dialogue. It’s so freaking annoying when games do this. Hogwarts Legacy and Quantum Break both did this and it’s REALLY annoying to have to constantly run back to listen to a companions dialog, or turn in circles to catch their voice bouncing off the environment so you can hear story dialogue.

My game doesn’t have dialogue, it’s all shots, explosions, and so forth, so positional is kind of important.

2 Likes