mirror of
https://gitlab.com/then-try-this/samplebrain.git
synced 2025-05-12 10:37:20 +00:00
timestretch and synaptic slide works
This commit is contained in:
parent
9599f8c142
commit
4e22d13f31
@ -76,7 +76,7 @@ public:
|
||||
void build_synapses_thresh(search_params ¶ms, double threshold);
|
||||
void build_synapses_fixed(search_params ¶ms);
|
||||
u32 search_synapses(const block &target, search_params ¶ms);
|
||||
double get_current_error() { return m_current_error/m_average_error; }
|
||||
double get_current_error() { return m_current_error; }
|
||||
|
||||
static bool unit_test();
|
||||
|
||||
|
@ -98,7 +98,7 @@ int main(int argc, char *argv[])
|
||||
*/
|
||||
|
||||
brain source;
|
||||
ifstream ifs("8bit.brain",ios::binary);
|
||||
ifstream ifs("shosta.brain",ios::binary);
|
||||
ifs||source;
|
||||
ifs.close();
|
||||
|
||||
@ -119,11 +119,12 @@ int main(int argc, char *argv[])
|
||||
renderer rr(source,target);
|
||||
rr.set_playing(true);
|
||||
rr.get_params()->m_ratio=0;
|
||||
rr.get_params()->m_usage_importance=0.5;
|
||||
rr.get_params()->m_num_synapses=150;
|
||||
rr.set_slide_error(5.5);
|
||||
rr.set_search_algo(renderer::SYNAPTIC);
|
||||
|
||||
rr.get_params()->m_usage_importance=0.6;
|
||||
source.set_usage_falloff(0.9);
|
||||
rr.get_params()->m_num_synapses=40;
|
||||
rr.set_slide_error(3400.5);
|
||||
rr.set_search_algo(renderer::SYNAPTIC_SLIDE);
|
||||
rr.set_target_mix(0.2);
|
||||
a->start_recording("debug");
|
||||
a->m_client.set_callback(run_audio, &rr);
|
||||
|
||||
|
@ -33,6 +33,7 @@ void renderer::init(brain &source, brain &target) {
|
||||
m_search_algo=BASIC;
|
||||
m_slide_error=1;
|
||||
m_target_index=0;
|
||||
m_target_counter=0;
|
||||
m_last_tgt_shift=0;
|
||||
}
|
||||
|
||||
@ -42,12 +43,164 @@ void renderer::reset() {
|
||||
m_target_time=0;
|
||||
m_render_time=0;
|
||||
m_target_index=0;
|
||||
m_target_counter=0;
|
||||
m_render_blocks.clear();
|
||||
m_source.jiggle();
|
||||
}
|
||||
|
||||
|
||||
void renderer::process(u32 nframes, float *buf) {
|
||||
if (!m_playing) return;
|
||||
if (!find_render_blocks(nframes)) return;
|
||||
|
||||
render(nframes,buf);
|
||||
|
||||
clean_up();
|
||||
|
||||
m_render_time+=nframes;
|
||||
m_target_time+=nframes;
|
||||
}
|
||||
|
||||
bool renderer::find_render_blocks(u32 nframes) {
|
||||
// get new blocks from source for the current buffer
|
||||
|
||||
// where are we phase?
|
||||
u32 tgt_shift = m_target.get_block_size()-m_target.get_overlap();
|
||||
u32 tgt_end = (m_target_time+nframes)/(float)tgt_shift;
|
||||
|
||||
// stuff has changed - recompute and abort
|
||||
if (tgt_shift!=m_last_tgt_shift ||
|
||||
tgt_end>=m_target.get_num_blocks() || m_source.get_num_blocks()==0) {
|
||||
reset();
|
||||
m_last_tgt_shift = tgt_shift;
|
||||
// next time...
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
// cerr<<"-----------------"<<endl;
|
||||
// cerr<<"tgt start:"<<m_target_index<<endl;
|
||||
// cerr<<"tgt end:"<<tgt_end<<endl;
|
||||
|
||||
// search phase
|
||||
// get indices for current buffer
|
||||
u32 counter = m_target_counter;
|
||||
//u32 cur_time = m_render_time;
|
||||
while (counter<=tgt_end) {
|
||||
u32 time=m_target_counter*tgt_shift;
|
||||
u32 src_index=0;
|
||||
|
||||
switch (m_search_algo) {
|
||||
case BASIC:
|
||||
src_index = m_source.search(m_target.get_block(m_target_index), m_search_params);
|
||||
break;
|
||||
case REV_BASIC:
|
||||
src_index = m_source.rev_search(m_target.get_block(m_target_index), m_search_params);
|
||||
break;
|
||||
case SYNAPTIC:
|
||||
case SYNAPTIC_SLIDE:
|
||||
src_index = m_source.search_synapses(m_target.get_block(m_target_index), m_search_params);
|
||||
break;
|
||||
}
|
||||
|
||||
if (m_search_algo==SYNAPTIC_SLIDE) {
|
||||
m_render_blocks.push_back(render_block(src_index,m_target_index,time));
|
||||
|
||||
if (m_source.get_current_error()<m_slide_error &&
|
||||
m_target_counter%1==0) {
|
||||
m_target_index++;
|
||||
}
|
||||
m_target_counter++;
|
||||
|
||||
} else {
|
||||
// put them in the index list
|
||||
m_render_blocks.push_back(render_block(src_index,m_target_index,time));
|
||||
|
||||
if (m_target_counter%1==0) {
|
||||
m_target_index++;
|
||||
}
|
||||
m_target_counter++;
|
||||
}
|
||||
counter++;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void renderer::render(u32 nframes, float *buf) {
|
||||
// render phase
|
||||
// render all blocks in list
|
||||
for (std::list<render_block>::iterator i=m_render_blocks.begin(); i!=m_render_blocks.end(); ++i) {
|
||||
const sample &pcm=m_source.get_block(i->m_index).get_pcm();
|
||||
const sample &n_pcm=m_source.get_block(i->m_index).get_n_pcm();
|
||||
const sample &target_pcm=m_target.get_block(i->m_tgt_index).get_pcm();
|
||||
// get the sample offset into the buffer
|
||||
s32 offset = i->m_time-m_render_time;
|
||||
|
||||
// assume midway through block
|
||||
u32 block_start = offset;
|
||||
u32 buffer_start = 0;
|
||||
if (offset<0) {
|
||||
block_start=-offset;
|
||||
if (block_start>=pcm.get_length()) i->m_finished=true;
|
||||
} else { // block is midway through buffer
|
||||
block_start=0;
|
||||
buffer_start=offset;
|
||||
}
|
||||
|
||||
// cerr<<"-----------------"<<endl;
|
||||
// cerr<<"block start:"<<block_start<<endl;
|
||||
// cerr<<"buffer start:"<<buffer_start<<endl;
|
||||
|
||||
if (!i->m_finished) {
|
||||
// mix in
|
||||
u32 buffer_pos = buffer_start;
|
||||
u32 block_pos = block_start;
|
||||
u32 block_end = pcm.get_length();
|
||||
|
||||
|
||||
while (block_pos<block_end && buffer_pos<nframes) {
|
||||
// mix with normalised version
|
||||
float brain_sample = (pcm[block_pos]*(1-m_n_mix)+
|
||||
n_pcm[block_pos]*m_n_mix);
|
||||
|
||||
// for mixing with target audio
|
||||
float target_sample = target_pcm[block_pos];
|
||||
|
||||
buf[buffer_pos]+=(brain_sample*(1-m_target_mix) +
|
||||
target_sample*m_target_mix)*0.2*m_volume;
|
||||
++buffer_pos;
|
||||
++block_pos;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void renderer::clean_up() {
|
||||
// cleanup phase
|
||||
// delete old ones
|
||||
std::list<render_block>::iterator i=m_render_blocks.begin();
|
||||
std::list<render_block>::iterator ni=m_render_blocks.begin();
|
||||
while(i!=m_render_blocks.end()) {
|
||||
ni++;
|
||||
if (i->m_finished) m_render_blocks.erase(i);
|
||||
i=ni;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
void renderer::old_process(u32 nframes, float *buf) {
|
||||
if (!m_playing) return;
|
||||
|
||||
// get new blocks from source for the current buffer
|
||||
u32 tgt_shift = m_target.get_block_size()-m_target.get_overlap();
|
||||
@ -160,6 +313,7 @@ void renderer::process(u32 nframes, float *buf) {
|
||||
m_target_time+=nframes;
|
||||
}
|
||||
|
||||
|
||||
bool renderer::unit_test() {
|
||||
brain source;
|
||||
source.load_sound("test_data/up.wav");
|
||||
|
@ -43,6 +43,7 @@ renderer(brain &source, brain &target) :
|
||||
void reset();
|
||||
|
||||
void process(u32 nframes, float *buf);
|
||||
void old_process(u32 nframes, float *buf);
|
||||
|
||||
void set_search_algo(search_algo s) { m_search_algo=s; }
|
||||
void set_playing(bool s) { m_playing=s; }
|
||||
@ -58,6 +59,11 @@ renderer(brain &source, brain &target) :
|
||||
|
||||
private:
|
||||
|
||||
bool find_render_blocks(u32 nframes);
|
||||
void render(u32 nframes, float *buf);
|
||||
void clean_up();
|
||||
|
||||
|
||||
// realtime stuff
|
||||
class render_block {
|
||||
public:
|
||||
@ -76,8 +82,10 @@ private:
|
||||
float m_volume;
|
||||
bool m_playing;
|
||||
u32 m_target_index;
|
||||
u32 m_target_counter;
|
||||
u32 m_target_time;
|
||||
u32 m_render_time;
|
||||
u32 m_stretch;
|
||||
float m_n_mix;
|
||||
float m_target_mix;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user