Skip to content

Commit

Permalink
Handle errors from AudioRenderThread::start (#424)
Browse files Browse the repository at this point in the history
* Handle errors from AudioRenderThread::start

Signed-off-by: Taym <[email protected]>

* Return Result from create_audio_context

Signed-off-by: Taym <[email protected]>

* Update examples

Signed-off-by: Taym <[email protected]>

* improved error handling

Signed-off-by: Taym <[email protected]>

* Use try_recv for result_receiver

Signed-off-by: Taym <[email protected]>

* Revert "Use try_recv for result_receiver"

This reverts commit 5c16fdf.

Signed-off-by: Taym <[email protected]>

* Refactor thread initialization to prevent blocking and resolve test timeouts.

Signed-off-by: Taym <[email protected]>

---------

Signed-off-by: Taym <[email protected]>
  • Loading branch information
Taym95 authored Aug 18, 2024
1 parent f6e6ee0 commit ed1d4c7
Show file tree
Hide file tree
Showing 8 changed files with 68 additions and 28 deletions.
30 changes: 25 additions & 5 deletions audio/context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@ use std::sync::{Arc, Mutex};
use std::thread::Builder;
use AudioBackend;

use crate::sink::AudioSinkError;

/// Describes the state of the audio context on the control thread.
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ProcessingState {
Expand Down Expand Up @@ -130,7 +132,7 @@ impl AudioContext {
client_context_id: &ClientContextId,
backend_chan: Arc<Mutex<Sender<BackendMsg>>>,
options: AudioContextOptions,
) -> Self {
) -> Result<Self, AudioSinkError> {
let (sample_rate, channels) = match options {
AudioContextOptions::RealTimeAudioContext(ref options) => (options.sample_rate, 2),
AudioContextOptions::OfflineAudioContext(ref options) => {
Expand All @@ -143,13 +145,31 @@ impl AudioContext {
let graph = AudioGraph::new(channels);
let dest_node = graph.dest_id();
let listener = graph.listener_id();

let (init_sender, init_receiver) = mpsc::channel();
Builder::new()
.name("AudioRenderThread".to_owned())
.spawn(move || {
AudioRenderThread::start::<B>(receiver, sender_, sample_rate, graph, options);
AudioRenderThread::start::<B>(
receiver,
sender_,
sample_rate,
graph,
options,
init_sender,
)
})
.unwrap();
Self {
.expect("Failed to spawn AudioRenderThread");

let init_thread_result = init_receiver
.recv()
.expect("Failed to receive result from AudioRenderThread");

if let Err(e) = init_thread_result {
return Err(e);
}

Ok(Self {
id,
client_context_id: *client_context_id,
backend_chan,
Expand All @@ -159,7 +179,7 @@ impl AudioContext {
dest_node,
listener,
make_decoder: Arc::new(|| B::make_decoder()),
}
})
}

pub fn state(&self) -> ProcessingState {
Expand Down
17 changes: 14 additions & 3 deletions audio/render_thread.rs
Original file line number Diff line number Diff line change
Expand Up @@ -160,10 +160,21 @@ impl AudioRenderThread {
sample_rate: f32,
graph: AudioGraph,
options: AudioContextOptions,
init_sender: Sender<Result<(), AudioSinkError>>,
) {
let mut thread = Self::prepare_thread::<B>(sender.clone(), sample_rate, graph, options)
.expect("Could not start audio render thread");
thread.event_loop(event_queue)
let mut thread =
match Self::prepare_thread::<B>(sender.clone(), sample_rate, graph, options) {
Ok(thread) => {
let _ = init_sender.send(Ok(()));
thread
}
Err(e) => {
let _ = init_sender.send(Err(e));
return;
}
};

thread.event_loop(event_queue);
}

make_render_thread_state_change!(resume, Running, play);
Expand Down
16 changes: 11 additions & 5 deletions backends/dummy/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -94,15 +94,15 @@ impl Backend for DummyBackend {
&self,
_id: &ClientContextId,
options: AudioContextOptions,
) -> Arc<Mutex<AudioContext>> {
) -> Result<Arc<Mutex<AudioContext>>, AudioSinkError> {
let (sender, _) = mpsc::channel();
let sender = Arc::new(Mutex::new(sender));
Arc::new(Mutex::new(AudioContext::new::<Self>(
Ok(Arc::new(Mutex::new(AudioContext::new::<Self>(
0,
&ClientContextId::build(1, 1),
sender,
options,
)))
)?)))
}

fn create_webrtc(&self, signaller: Box<dyn WebRtcSignaller>) -> WebRtcController {
Expand Down Expand Up @@ -300,10 +300,16 @@ impl WebRtcControllerBackend for DummyWebRtcController {
fn add_ice_candidate(&mut self, _: IceCandidate) -> WebRtcResult {
Ok(())
}
fn create_offer(&mut self, _: Box<dyn FnOnce(SessionDescription) + Send + 'static>) -> WebRtcResult {
fn create_offer(
&mut self,
_: Box<dyn FnOnce(SessionDescription) + Send + 'static>,
) -> WebRtcResult {
Ok(())
}
fn create_answer(&mut self, _: Box<dyn FnOnce(SessionDescription) + Send + 'static>) -> WebRtcResult {
fn create_answer(
&mut self,
_: Box<dyn FnOnce(SessionDescription) + Send + 'static>,
) -> WebRtcResult {
Ok(())
}
fn add_stream(&mut self, _: &MediaStreamId) -> WebRtcResult {
Expand Down
18 changes: 9 additions & 9 deletions backends/gstreamer/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -184,18 +184,18 @@ impl Backend for GStreamerBackend {
&self,
client_context_id: &ClientContextId,
options: AudioContextOptions,
) -> Arc<Mutex<AudioContext>> {
) -> Result<Arc<Mutex<AudioContext>>, AudioSinkError> {
let id = self.next_instance_id.fetch_add(1, Ordering::Relaxed);
let context = Arc::new(Mutex::new(AudioContext::new::<Self>(
id,
client_context_id,
self.backend_chan.clone(),
options,
)));
let audio_context =
AudioContext::new::<Self>(id, client_context_id, self.backend_chan.clone(), options)?;

let audio_context = Arc::new(Mutex::new(audio_context));

let mut instances = self.instances.lock().unwrap();
let entry = instances.entry(*client_context_id).or_insert(Vec::new());
entry.push((id, Arc::downgrade(&context).clone()));
context
entry.push((id, Arc::downgrade(&audio_context).clone()));

Ok(audio_context)
}

fn create_webrtc(&self, signaller: Box<dyn WebRtcSignaller>) -> WebRtcController {
Expand Down
4 changes: 2 additions & 2 deletions examples/muted_audiocontext.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ fn run_example(servo_media: Arc<ServoMedia>) {
let context_id1 = &ClientContextId::build(1, 1);
let context1 = servo_media.create_audio_context(&context_id1, Default::default());
{
let context = context1.lock().unwrap();
let context = context1.unwrap().lock().unwrap();
let dest = context.dest_node();
let options = OscillatorNodeOptions::default();
let osc1 = context.create_node(
Expand All @@ -33,7 +33,7 @@ fn run_example(servo_media: Arc<ServoMedia>) {
{
let mut options = OscillatorNodeOptions::default();
options.oscillator_type = Sawtooth;
let context = context2.lock().unwrap();
let context = context2.unwrap().lock().unwrap();
let dest = context.dest_node();
let osc3 = context.create_node(
AudioNodeInit::OscillatorNode(options.clone()),
Expand Down
2 changes: 1 addition & 1 deletion examples/params_connect2.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ use std::{thread, time};
fn run_example(servo_media: Arc<ServoMedia>) {
let context =
servo_media.create_audio_context(&ClientContextId::build(1, 1), Default::default());
let context = context.lock().unwrap();
let context = context.unwrap().lock().unwrap();
let mut options = OscillatorNodeOptions::default();
options.freq = 2.0;
let lfo = context.create_node(AudioNodeInit::OscillatorNode(options), Default::default());
Expand Down
2 changes: 1 addition & 1 deletion examples/stream_reader_node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ fn run_example(servo_media: Arc<ServoMedia>) {
let context =
servo_media.create_audio_context(&ClientContextId::build(1, 1), Default::default());
let input = servo_media.create_audiostream();
let context = context.lock().unwrap();
let context = context.unwrap().lock().unwrap();
let dest = context.dest_node();
let osc1 = context.create_node(
AudioNodeInit::MediaStreamSourceNode(input),
Expand Down
7 changes: 5 additions & 2 deletions servo-media/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,10 @@ use std::ops::Deref;
use std::sync::{Arc, Mutex};
use std::thread;

use audio::context::{AudioContext, AudioContextOptions};
use audio::{
context::{AudioContext, AudioContextOptions},
sink::AudioSinkError,
};
use player::audio::AudioRenderer;
use player::context::PlayerGLContext;
use player::ipc_channel::ipc::IpcSender;
Expand Down Expand Up @@ -57,7 +60,7 @@ pub trait Backend: Send + Sync {
&self,
id: &ClientContextId,
options: AudioContextOptions,
) -> Arc<Mutex<AudioContext>>;
) -> Result<Arc<Mutex<AudioContext>>, AudioSinkError>;
fn create_webrtc(&self, signaller: Box<dyn WebRtcSignaller>) -> WebRtcController;
fn can_play_type(&self, media_type: &str) -> SupportsMediaType;
fn set_capture_mocking(&self, _mock: bool) {}
Expand Down

0 comments on commit ed1d4c7

Please sign in to comment.