void diy::detail::KDTreeSamplingPartition<Block,Point>:: operator()(Block* b, const diy::ReduceProxy& srp, const KDTreePartners& partners) const { int dim; if (srp.round() < partners.rounds()) dim = partners.dim(srp.round()); else dim = partners.dim(srp.round() - 1); if (srp.round() == partners.rounds()) update_links(b, srp, dim, partners.sub_round(srp.round() - 2), partners.swap_rounds(), partners.wrap, partners.domain); // -1 would be the "uninformative" link round else if (partners.swap_round(srp.round()) && partners.sub_round(srp.round()) < 0) // link round { dequeue_exchange(b, srp, dim); // from the swap round split_to_neighbors(b, srp, dim); } else if (partners.swap_round(srp.round())) { Samples samples; receive_samples(b, srp, samples); enqueue_exchange(b, srp, dim, samples); } else if (partners.sub_round(srp.round()) == 0) { if (srp.round() > 0) { int prev_dim = dim - 1; if (prev_dim < 0) prev_dim += dim_; update_links(b, srp, prev_dim, partners.sub_round(srp.round() - 2), partners.swap_rounds(), partners.wrap, partners.domain); // -1 would be the "uninformative" link round } compute_local_samples(b, srp, dim); } else if (partners.sub_round(srp.round()) < (int) partners.histogram.rounds()/2) // we are reusing partners class, so really we are talking about the samples rounds here { Samples samples; add_samples(b, srp, samples); srp.enqueue(srp.out_link().target(0), samples); } else { Samples samples; add_samples(b, srp, samples); if (samples.size() != 1) { // pick the median std::nth_element(samples.begin(), samples.begin() + samples.size()/2, samples.end()); std::swap(samples[0], samples[samples.size()/2]); //std::sort(samples.begin(), samples.end()); //samples[0] = (samples[samples.size()/2] + samples[samples.size()/2 + 1])/2; samples.resize(1); } forward_samples(b, srp, samples); } }
int main() { e_epiphany_t group0; e_mem_t shm1; host_chan_t chan2; e_mem_t shm3; pthread_t t5; bool r12; e_init(0); e_reset_system(); e_open(&group0, 0, 0, 4, 4); e_reset_group(&group0); setup_queues(); e_alloc(&shm1, sa2, 2048); init_host_chan(&chan2, &group0, 0, 0, &shm1, la0, la1); init_core_chan(&group0, 0, 1, la3, la4); e_load("core0.srec", &group0, 0, 0, 1); init_core_chan(&group0, 0, 2, la6, la7); e_load("core1.srec", &group0, 0, 1, 1); init_core_chan(&group0, 0, 3, la9, la10); e_load("core2.srec", &group0, 0, 2, 1); init_core_chan(&group0, 1, 3, la12, la13); e_load("core3.srec", &group0, 0, 3, 1); init_core_chan(&group0, 1, 2, la15, la16); e_load("core7.srec", &group0, 1, 3, 1); init_core_chan(&group0, 1, 1, la18, la19); e_load("core6.srec", &group0, 1, 2, 1); init_core_chan(&group0, 1, 0, la21, la22); e_load("core5.srec", &group0, 1, 1, 1); init_core_chan(&group0, 2, 0, la24, la25); e_load("core4.srec", &group0, 1, 0, 1); init_core_chan(&group0, 2, 1, la27, la28); e_load("core8.srec", &group0, 2, 0, 1); init_core_chan(&group0, 2, 2, la30, la31); e_load("core9.srec", &group0, 2, 1, 1); init_core_chan(&group0, 2, 3, la33, la34); e_load("core10.srec", &group0, 2, 2, 1); init_core_chan(&group0, 3, 3, la36, la37); e_load("core11.srec", &group0, 2, 3, 1); init_core_chan(&group0, 3, 2, la39, la40); e_load("core15.srec", &group0, 3, 3, 1); init_core_chan(&group0, 3, 1, la42, la43); e_load("core14.srec", &group0, 3, 2, 1); init_core_chan(&group0, 3, 0, la45, la46); e_load("core13.srec", &group0, 3, 1, 1); e_alloc(&shm3, sa50, 2048); init_host_chan(&chan4, &group0, 3, 0, &shm3, la48, la49); e_load("core12.srec", &group0, 3, 0, 1); pthread_create(&t5, NULL, thread_t5, NULL); r12 = true; while (1) { bool v13; float _a14[512]; float *a14 = _a14; bool v15; v13 = r12; if (!v13) break; v15 = receive_samples(a14); if (v15) { uint32_t r16; float _a17[512]; float *a17 = _a17; uint32_t v18; bool v19; r16 = 512; r16 = 512; for (v18 = 0; v18 < 512; v18++) { a17[v18] = a14[v18]; } v19 = host_write_h2c(chan2, a17, 0, r16); r12 = v19; } else { r12 = false; } } host_close_chan(chan2); pthread_join(t5, NULL); teardown_queues(); e_free(&shm1); e_free(&shm3); e_reset_group(&group0); e_close(&group0); e_finalize(); return 0; }
/* Returns 1 if the subframe is synchronized in time, 0 otherwise */ int srslte_ue_sync_zerocopy(srslte_ue_sync_t *q, cf_t *input_buffer) { int ret = SRSLTE_ERROR_INVALID_INPUTS; uint32_t track_idx; if (q != NULL && input_buffer != NULL) { if (q->file_mode) { int n = srslte_filesource_read(&q->file_source, input_buffer, q->sf_len); if (n < 0) { fprintf(stderr, "Error reading input file\n"); return SRSLTE_ERROR; } if (n == 0) { return 7; // srslte_filesource_seek(&q->file_source, 0); // q->sf_idx = 9; // int n = srslte_filesource_read(&q->file_source, input_buffer, q->sf_len); // if (n < 0) { // fprintf(stderr, "Error reading input file\n"); // return SRSLTE_ERROR; // } } if (q->correct_cfo) { srslte_cfo_correct(&q->file_cfo_correct, input_buffer, input_buffer, q->file_cfo / 15000 / q->fft_size); } q->sf_idx++; if (q->sf_idx == 10) { q->sf_idx = 0; } INFO("Reading %d samples. sf_idx = %d\n", q->sf_len, q->sf_idx); ret = 1; } else { if (receive_samples(q, input_buffer)) { fprintf(stderr, "Error receiving samples\n"); return SRSLTE_ERROR; } switch (q->state) { case SF_FIND: switch(srslte_sync_find(&q->sfind, input_buffer, 0, &q->peak_idx)) { case SRSLTE_SYNC_ERROR: ret = SRSLTE_ERROR; fprintf(stderr, "Error finding correlation peak (%d)\n", ret); return SRSLTE_ERROR; case SRSLTE_SYNC_FOUND: ret = find_peak_ok(q, input_buffer); break; case SRSLTE_SYNC_FOUND_NOSPACE: /* If a peak was found but there is not enough space for SSS/CP detection, discard a few samples */ printf("No space for SSS/CP detection. Realigning frame...\n"); q->recv_callback(q->stream, dummy_offset_buffer, q->frame_len/2, NULL); srslte_sync_reset(&q->sfind); ret = SRSLTE_SUCCESS; break; default: ret = SRSLTE_SUCCESS; break; } if (q->do_agc) { srslte_agc_process(&q->agc, input_buffer, q->sf_len); } break; case SF_TRACK: ret = 1; srslte_sync_sss_en(&q->strack, q->decode_sss_on_track); q->sf_idx = (q->sf_idx + q->nof_recv_sf) % 10; /* Every SF idx 0 and 5, find peak around known position q->peak_idx */ if (q->sf_idx == 0 || q->sf_idx == 5) { if (q->do_agc && (q->agc_period == 0 || (q->agc_period && (q->frame_total_cnt%q->agc_period) == 0))) { srslte_agc_process(&q->agc, input_buffer, q->sf_len); } #ifdef MEASURE_EXEC_TIME struct timeval t[3]; gettimeofday(&t[1], NULL); #endif track_idx = 0; /* Track PSS/SSS around the expected PSS position * In tracking phase, the subframe carrying the PSS is always the last one of the frame */ switch(srslte_sync_find(&q->strack, input_buffer, q->frame_len - q->sf_len/2 - q->fft_size - q->strack.max_offset/2, &track_idx)) { case SRSLTE_SYNC_ERROR: ret = SRSLTE_ERROR; fprintf(stderr, "Error tracking correlation peak\n"); return SRSLTE_ERROR; case SRSLTE_SYNC_FOUND: ret = track_peak_ok(q, track_idx); break; case SRSLTE_SYNC_FOUND_NOSPACE: // It's very very unlikely that we fall here because this event should happen at FIND phase only ret = 0; q->state = SF_FIND; printf("Warning: No space for SSS/CP while in tracking phase\n"); break; case SRSLTE_SYNC_NOFOUND: ret = track_peak_no(q); break; } #ifdef MEASURE_EXEC_TIME gettimeofday(&t[2], NULL); get_time_interval(t); q->mean_exec_time = (float) SRSLTE_VEC_CMA((float) t[0].tv_usec, q->mean_exec_time, q->frame_total_cnt); #endif if (ret == SRSLTE_ERROR) { fprintf(stderr, "Error processing tracking peak\n"); q->state = SF_FIND; return SRSLTE_SUCCESS; } q->frame_total_cnt++; } else { if (q->correct_cfo) { srslte_cfo_correct(&q->sfind.cfocorr, input_buffer, input_buffer, -srslte_sync_get_cfo(&q->strack) / q->fft_size); } } break; } } } return ret; }