-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathlive_audio.rs
More file actions
225 lines (205 loc) · 8.37 KB
/
live_audio.rs
File metadata and controls
225 lines (205 loc) · 8.37 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
use std::ops::{AddAssign, IndexMut};
use crate::audio_processing::playback::{PlaybackState, PlaybackStatus};
use crate::audio_processing::sample::SamplePlayer;
use crate::audio_processing::{Frame, Interpolation};
use crate::project::song::Song;
use crate::sample::Sample;
use crate::{OutputConfig, ToWorkerMsg};
use dasp::sample::ToSample;
use simple_left_right::Reader;
pub(crate) struct LiveAudio<StreamData> {
song: Reader<Song>,
playback_state: Option<PlaybackState>,
live_note: Option<SamplePlayer>,
manager: rtrb::Consumer<ToWorkerMsg>,
state_sender: rt_write_lock::Writer<(Option<PlaybackStatus>, StreamData)>,
config: OutputConfig,
buffer: Box<[Frame]>,
}
impl<S> LiveAudio<S> {
/// Not realtime safe.
pub fn new(
song: Reader<Song>,
manager: rtrb::Consumer<ToWorkerMsg>,
state_sender: rt_write_lock::Writer<(Option<PlaybackStatus>, S)>,
config: OutputConfig,
) -> Self {
Self {
song,
playback_state: None,
live_note: None,
manager,
state_sender,
config,
buffer: vec![Frame::default(); usize::try_from(config.buffer_size).unwrap() * 2].into(),
}
}
#[rtsan_standalone::nonblocking]
fn send_state(&mut self, stream_data: S) {
let playback_state = self.playback_state.as_ref().map(|s| s.get_status());
let mut write_guard = self.state_sender.write();
// make this more granular once the state includes AudioData or other allocated data
*write_guard = (playback_state, stream_data);
}
#[rtsan_standalone::nonblocking]
/// returns true if work was done
fn fill_internal_buffer(&mut self, len: usize) -> bool {
// the output buffer should be smaller than the internal buffer
let buffer = &mut self.buffer[..len];
let song = self.song.lock();
// process manager events
while let Ok(event) = self.manager.pop() {
match event {
ToWorkerMsg::StopPlayback => self.playback_state = None,
ToWorkerMsg::Playback(settings) => {
self.playback_state =
PlaybackState::new(&song, self.config.sample_rate, settings);
}
ToWorkerMsg::PlayEvent(note) => {
if let Some(sample) = &song.samples[usize::from(note.sample_instr)] {
let sample_player = SamplePlayer::new(
Sample::clone(&sample.1),
sample.0,
// this at some point was divided by two, if i ever figure out why, maybe put it back
self.config.sample_rate,
note.note,
);
self.live_note = Some(sample_player);
}
}
ToWorkerMsg::StopLiveNote => self.live_note = None,
ToWorkerMsg::SetInterpolation(i) => self.config.interpolation = i,
}
}
if self.live_note.is_none() && self.playback_state.is_none() {
// no processing todo
return false;
}
// clear buffer from past run
// only happens if there is work todo
buffer.fill(Frame::default());
// process live_note
if let Some(live_note) = &mut self.live_note {
fn process_note<const INTERPOLATION: u8>(
buffer: &mut [Frame],
note: &mut SamplePlayer,
) {
buffer
.iter_mut()
.zip(note.iter::<{ INTERPOLATION }>())
.for_each(|(buf, note)| buf.add_assign(note));
}
match self.config.interpolation {
Interpolation::Nearest => {
process_note::<{ Interpolation::Nearest as u8 }>(buffer, live_note)
}
Interpolation::Linear => {
process_note::<{ Interpolation::Linear as u8 }>(buffer, live_note)
}
Interpolation::Quadratic => {
process_note::<{ Interpolation::Quadratic as u8 }>(buffer, live_note)
}
}
if live_note.check_position().is_break() {
self.live_note = None;
}
}
// process song playback
if let Some(playback) = &mut self.playback_state {
fn process_playback<const INTERPOLATION: u8>(
buffer: &mut [Frame],
playback: &mut PlaybackState,
song: &Song,
) {
buffer
.iter_mut()
.zip(playback.iter::<{ INTERPOLATION }>(song))
.for_each(|(buf, note)| buf.add_assign(note));
}
match self.config.interpolation {
Interpolation::Nearest => {
process_playback::<{ Interpolation::Nearest as u8 }>(buffer, playback, &song)
}
Interpolation::Linear => {
process_playback::<{ Interpolation::Linear as u8 }>(buffer, playback, &song)
}
Interpolation::Quadratic => {
process_playback::<{ Interpolation::Quadratic as u8 }>(buffer, playback, &song)
}
}
if playback.is_done() {
self.playback_state = None;
}
}
true
}
/// converts the internal buffer to any possible output format and channel count
/// sums stereo to mono and fills channels 3 and up with silence
#[rtsan_standalone::nonblocking]
#[inline]
fn fill_from_internal<Sample: dasp::sample::Sample + dasp::sample::FromSample<f32>>(
&mut self,
data: &mut [Sample],
) {
// convert the internal buffer and move it to the out_buffer
if self.config.channel_count.get() == 1 {
data.iter_mut()
.zip(self.buffer.iter())
.for_each(|(out, buf)| *out = buf.sum_to_mono().to_sample_());
} else {
data.chunks_exact_mut(usize::from(self.config.channel_count.get()))
.map(|frame| frame.split_first_chunk_mut::<2>().unwrap().0)
.zip(self.buffer.iter())
.for_each(|(out, buf)| *out = buf.to_sample());
}
}
// unsure wether i want to use this or untyped_callback
// also relevant when cpal gets made into a generic that maybe this gets useful
pub fn get_typed_callback<Sample: dasp::sample::Sample + dasp::sample::FromSample<f32>>(
mut self,
) -> impl FnMut(&mut [Sample], S) {
move |audio_data, stream_data| {
let channel_count = usize::from(self.config.channel_count.get());
assert!(audio_data.len().is_multiple_of(channel_count));
let out_frames = audio_data.len() / channel_count;
assert!(self.buffer.len() > out_frames);
// assert_eq!(
// data.len(),
// usize::try_from(self.config.buffer_size).unwrap()
// * usize::from(self.config.channel_count.get())
// );
if self.fill_internal_buffer(out_frames) {
self.fill_from_internal(audio_data);
}
self.send_state(stream_data);
}
// move |data, info| {
// assert_eq!(
// data.len(),
// usize::try_from(self.config.buffer_size).unwrap()
// * usize::from(self.config.channel_count.get())
// );
// self.send_state(Some(info));
// }
}
// pub fn get_callback(mut self) -> impl FnMut(&mut [Frame], S::BufferInformation) {
// move |data, info| {
// assert_eq!(data.len(), self.config.buffer_size as usize * self.config.channel_count.get() as usize)
// if self.fill_internal_buffer() {
// self.fill_from_internal(data);
// }
// }
// }
}
// only used for testing
// if not testing is unused
#[allow(dead_code)]
fn sine(output: &mut [[f32; 2]], sample_rate: f32) {
let mut sample_clock = 0f32;
for frame in output {
sample_clock = (sample_clock + 1.) % sample_rate;
let value = (sample_clock * 440. * 2. * std::f32::consts::PI / sample_rate).sin();
*frame.index_mut(0) = value;
*frame.index_mut(1) = value;
}
}