/* The main CPU accumulator thread */ void guppi_accum_thread(void *_args) { float **accumulator; //indexed accumulator[accum_id][chan][subband][stokes] char accum_dirty[NUM_SW_STATES]; struct sdfits_data_columns data_cols[NUM_SW_STATES]; int payload_type; int i, j, k, rv; /* Get arguments */ struct guppi_thread_args *args = (struct guppi_thread_args *)_args; /* Set cpu affinity */ cpu_set_t cpuset, cpuset_orig; sched_getaffinity(0, sizeof(cpu_set_t), &cpuset_orig); //CPU_ZERO(&cpuset); CPU_CLR(13, &cpuset); CPU_SET(9, &cpuset); rv = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset); if (rv<0) { guppi_error("guppi_accum_thread", "Error setting cpu affinity."); perror("sched_setaffinity"); } /* Set priority */ rv = setpriority(PRIO_PROCESS, 0, args->priority); if (rv<0) { guppi_error("guppi_accum_thread", "Error setting priority level."); perror("set_priority"); } /* Attach to status shared mem area */ struct guppi_status st; rv = guppi_status_attach(&st); if (rv!=GUPPI_OK) { guppi_error("guppi_accum_thread", "Error attaching to status shared memory."); pthread_exit(NULL); } pthread_cleanup_push((void *)guppi_status_detach, &st); pthread_cleanup_push((void *)set_exit_status, &st); pthread_cleanup_push((void *)guppi_thread_set_finished, args); /* Init status */ guppi_status_lock_safe(&st); hputs(st.buf, STATUS_KEY, "init"); guppi_status_unlock_safe(&st); /* Read in general parameters */ struct guppi_params gp; struct sdfits sf; pthread_cleanup_push((void *)guppi_free_sdfits, &sf); /* Attach to databuf shared mem */ struct guppi_databuf *db_in, *db_out; db_in = guppi_databuf_attach(args->input_buffer); char errmsg[256]; if (db_in==NULL) { sprintf(errmsg, "Error attaching to input databuf(%d) shared memory.", args->input_buffer); guppi_error("guppi_accum_thread", errmsg); pthread_exit(NULL); } pthread_cleanup_push((void *)guppi_databuf_detach, db_in); db_out = guppi_databuf_attach(args->output_buffer); if (db_out==NULL) { sprintf(errmsg, "Error attaching to output databuf(%d) shared memory.", args->output_buffer); guppi_error("guppi_accum_thread", errmsg); pthread_exit(NULL); } pthread_cleanup_push((void *)guppi_databuf_detach, db_out); /* Determine high/low bandwidth mode */ char bw_mode[16]; if (hgets(st.buf, "BW_MODE", 16, bw_mode)) { if(strncmp(bw_mode, "high", 4) == 0) payload_type = INT_PAYLOAD; else if(strncmp(bw_mode, "low", 3) == 0) payload_type = FLOAT_PAYLOAD; else guppi_error("guppi_net_thread", "Unsupported bandwidth mode"); } else guppi_error("guppi_net_thread", "BW_MODE not set"); /* Read nchan and nsubband from status shared memory */ guppi_read_obs_params(st.buf, &gp, &sf); /* Allocate memory for vector accumulators */ create_accumulators(&accumulator, sf.hdr.nchan, sf.hdr.nsubband); pthread_cleanup_push((void *)destroy_accumulators, accumulator); /* Clear the vector accumulators */ for(i = 0; i < NUM_SW_STATES; i++) accum_dirty[i] = 1; reset_accumulators(accumulator, data_cols, accum_dirty, sf.hdr.nsubband, sf.hdr.nchan); /* Loop */ int curblock_in=0, curblock_out=0; int first=1; float reqd_exposure=0; double accum_time=0; int integ_num; float pfb_rate; int heap, accumid, struct_offset, array_offset; char *hdr_in=NULL, *hdr_out=NULL; struct databuf_index *index_in, *index_out; int nblock_int=0, npacket=0, n_pkt_drop=0, n_heap_drop=0; signal(SIGINT,cc); while (run) { /* Note waiting status */ guppi_status_lock_safe(&st); hputs(st.buf, STATUS_KEY, "waiting"); guppi_status_unlock_safe(&st); /* Wait for buf to have data */ rv = guppi_databuf_wait_filled(db_in, curblock_in); if (rv!=0) continue; /* Note waiting status and current block*/ guppi_status_lock_safe(&st); hputs(st.buf, STATUS_KEY, "accumulating"); hputi4(st.buf, "ACCBLKIN", curblock_in); guppi_status_unlock_safe(&st); /* Read param struct for this block */ hdr_in = guppi_databuf_header(db_in, curblock_in); if (first) guppi_read_obs_params(hdr_in, &gp, &sf); else guppi_read_subint_params(hdr_in, &gp, &sf); /* Do any first time stuff: first time code runs, not first time process this block */ if (first) { /* Set up first output header. This header is copied from block to block each time a new block is created */ hdr_out = guppi_databuf_header(db_out, curblock_out); memcpy(hdr_out, guppi_databuf_header(db_in, curblock_in), GUPPI_STATUS_SIZE); /* Read required exposure and PFB rate from status shared memory */ reqd_exposure = sf.data_columns.exposure; pfb_rate = sf.hdr.efsampfr / (2 * sf.hdr.nchan); /* Initialise the index in the output block */ index_out = (struct databuf_index*)guppi_databuf_index(db_out, curblock_out); index_out->num_datasets = 0; index_out->array_size = sf.hdr.nsubband * sf.hdr.nchan * NUM_STOKES * 4; first=0; } /* Loop through each spectrum (heap) in input buffer */ index_in = (struct databuf_index*)guppi_databuf_index(db_in, curblock_in); for(heap = 0; heap < index_in->num_heaps; heap++) { /* If invalid, record it and move to next heap */ if(!index_in->cpu_gpu_buf[heap].heap_valid) { n_heap_drop++; continue; } /* Read in heap from buffer */ char* heap_addr = (char*)(guppi_databuf_data(db_in, curblock_in) + sizeof(struct freq_spead_heap) * heap); struct freq_spead_heap* freq_heap = (struct freq_spead_heap*)(heap_addr); char* payload_addr = (char*)(guppi_databuf_data(db_in, curblock_in) + sizeof(struct freq_spead_heap) * MAX_HEAPS_PER_BLK + (index_in->heap_size - sizeof(struct freq_spead_heap)) * heap ); int *i_payload = (int*)(payload_addr); float *f_payload = (float*)(payload_addr); accumid = freq_heap->status_bits & 0x7; /*Debug: print heap */ /* printf("%d, %d, %d, %d, %d, %d\n", freq_heap->time_cntr, freq_heap->spectrum_cntr, freq_heap->integ_size, freq_heap->mode, freq_heap->status_bits, freq_heap->payload_data_off); */ /* If we have accumulated for long enough, write vectors to output block */ if(accum_time >= reqd_exposure) { for(i = 0; i < NUM_SW_STATES; i++) { /*If a particular accumulator is dirty, write it to output buffer */ if(accum_dirty[i]) { /*If insufficient space, first mark block as filled and request new block*/ index_out = (struct databuf_index*)(guppi_databuf_index(db_out, curblock_out)); if( (index_out->num_datasets+1) * (index_out->array_size + sizeof(struct sdfits_data_columns)) > db_out->block_size) { printf("Accumulator finished with output block %d\n", curblock_out); /* Write block number to status buffer */ guppi_status_lock_safe(&st); hputi4(st.buf, "ACCBLKOU", curblock_out); guppi_status_unlock_safe(&st); /* Update packet count and loss fields in output header */ hputi4(hdr_out, "NBLOCK", nblock_int); hputi4(hdr_out, "NPKT", npacket); hputi4(hdr_out, "NPKTDROP", n_pkt_drop); hputi4(hdr_out, "NHPDROP", n_heap_drop); /* Close out current integration */ guppi_databuf_set_filled(db_out, curblock_out); /* Wait for next output buf */ curblock_out = (curblock_out + 1) % db_out->n_block; guppi_databuf_wait_free(db_out, curblock_out); while ((rv=guppi_databuf_wait_free(db_out, curblock_out)) != GUPPI_OK) { if (rv==GUPPI_TIMEOUT) { guppi_warn("guppi_accum_thread", "timeout while waiting for output block"); continue; } else { guppi_error("guppi_accum_thread", "error waiting for free databuf"); run=0; pthread_exit(NULL); break; } } hdr_out = guppi_databuf_header(db_out, curblock_out); memcpy(hdr_out, guppi_databuf_header(db_in, curblock_in), GUPPI_STATUS_SIZE); /* Initialise the index in new output block */ index_out = (struct databuf_index*)guppi_databuf_index(db_out, curblock_out); index_out->num_datasets = 0; index_out->array_size = sf.hdr.nsubband * sf.hdr.nchan * NUM_STOKES * 4; nblock_int=0; npacket=0; n_pkt_drop=0; n_heap_drop=0; } /*Update index for output buffer*/ index_out = (struct databuf_index*)(guppi_databuf_index(db_out, curblock_out)); if(index_out->num_datasets == 0) struct_offset = 0; else struct_offset = index_out->disk_buf[index_out->num_datasets-1].array_offset + index_out->array_size; array_offset = struct_offset + sizeof(struct sdfits_data_columns); index_out->disk_buf[index_out->num_datasets].struct_offset = struct_offset; index_out->disk_buf[index_out->num_datasets].array_offset = array_offset; /*Copy sdfits_data_columns struct to disk buffer */ memcpy(guppi_databuf_data(db_out, curblock_out) + struct_offset, &data_cols[i], sizeof(struct sdfits_data_columns)); /*Copy data array to disk buffer */ memcpy(guppi_databuf_data(db_out, curblock_out) + array_offset, accumulator[i], index_out->array_size); /*Update SDFITS data_columns pointer to data array */ ((struct sdfits_data_columns*) (guppi_databuf_data(db_out, curblock_out) + struct_offset))->data = (unsigned char*)(guppi_databuf_data(db_out, curblock_out) + array_offset); index_out->num_datasets = index_out->num_datasets + 1; } } accum_time = 0; integ_num += 1; reset_accumulators(accumulator, data_cols, accum_dirty, sf.hdr.nsubband, sf.hdr.nchan); } /* Only add spectrum to accumulator if blanking bit is low */ if((freq_heap->status_bits & 0x08) == 0) { /* Fill in data columns header fields */ if(!accum_dirty[accumid]) { /*Record SPEAD header fields*/ data_cols[accumid].time = index_in->cpu_gpu_buf[heap].heap_rcvd_mjd; data_cols[accumid].time_counter = freq_heap->time_cntr; data_cols[accumid].integ_num = integ_num; data_cols[accumid].sttspec = freq_heap->spectrum_cntr; data_cols[accumid].accumid = accumid; /* Fill in rest of fields from status buffer */ strcpy(data_cols[accumid].object, sf.data_columns.object); data_cols[accumid].azimuth = sf.data_columns.azimuth; data_cols[accumid].elevation = sf.data_columns.elevation; data_cols[accumid].bmaj = sf.data_columns.bmaj; data_cols[accumid].bmin = sf.data_columns.bmin; data_cols[accumid].bpa = sf.data_columns.bpa; data_cols[accumid].centre_freq_idx = sf.data_columns.centre_freq_idx; data_cols[accumid].ra = sf.data_columns.ra; data_cols[accumid].dec = sf.data_columns.dec; data_cols[accumid].exposure = 0.0; for(i = 0; i < NUM_SW_STATES; i++) data_cols[accumid].centre_freq[i] = sf.data_columns.centre_freq[i]; accum_dirty[accumid] = 1; } data_cols[accumid].exposure += (float)(freq_heap->integ_size)/pfb_rate; data_cols[accumid].stpspec = freq_heap->spectrum_cntr; /* Add spectrum to appropriate vector accumulator (high-bw mode) */ if(payload_type == INT_PAYLOAD) { for(i = 0; i < sf.hdr.nchan; i++) { for(j = 0; j < sf.hdr.nsubband; j++) { for(k = 0; k < NUM_STOKES; k++) { accumulator[accumid] [i*sf.hdr.nsubband*NUM_STOKES + j*NUM_STOKES + k] += (float)i_payload[i*sf.hdr.nsubband*NUM_STOKES + j*NUM_STOKES + k]; } } } } /* Add spectrum to appropriate vector accumulator (low-bw mode) */ else { for(i = 0; i < sf.hdr.nchan; i++) { for(j = 0; j < sf.hdr.nsubband; j++) { for(k = 0; k < NUM_STOKES; k++) { accumulator[accumid] [i*sf.hdr.nsubband*NUM_STOKES + j*NUM_STOKES + k] += f_payload[i*sf.hdr.nsubband*NUM_STOKES + j*NUM_STOKES + k]; } } } } } accum_time += (double)freq_heap->integ_size / pfb_rate; } /* Update packet count and loss fields from input header */ nblock_int++; npacket += gp.num_pkts_rcvd; n_pkt_drop += gp.num_pkts_dropped; /* Done with current input block */ guppi_databuf_set_free(db_in, curblock_in); curblock_in = (curblock_in + 1) % db_in->n_block; /* Check for cancel */ pthread_testcancel(); } pthread_exit(NULL); pthread_cleanup_pop(0); /* Closes set_exit_status */ pthread_cleanup_pop(0); /* Closes set_finished */ pthread_cleanup_pop(0); /* Closes guppi_free_sdfits */ pthread_cleanup_pop(0); /* Closes ? */ pthread_cleanup_pop(0); /* Closes destroy_accumulators */ pthread_cleanup_pop(0); /* Closes guppi_status_detach */ pthread_cleanup_pop(0); /* Closes guppi_databuf_detach */ }
void guppi_psrfits_thread(void *_args) { /* Get args */ struct guppi_thread_args *args = (struct guppi_thread_args *)_args; pthread_cleanup_push((void *)guppi_thread_set_finished, args); /* Set cpu affinity */ cpu_set_t cpuset, cpuset_orig; sched_getaffinity(0, sizeof(cpu_set_t), &cpuset_orig); CPU_ZERO(&cpuset); CPU_SET(1, &cpuset); int rv = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset); if (rv<0) { guppi_error("guppi_psrfits_thread", "Error setting cpu affinity."); perror("sched_setaffinity"); } /* Set priority */ rv = setpriority(PRIO_PROCESS, 0, args->priority); if (rv<0) { guppi_error("guppi_psrfits_thread", "Error setting priority level."); perror("set_priority"); } /* Attach to status shared mem area */ struct guppi_status st; rv = guppi_status_attach(&st); if (rv!=GUPPI_OK) { guppi_error("guppi_psrfits_thread", "Error attaching to status shared memory."); pthread_exit(NULL); } pthread_cleanup_push((void *)guppi_status_detach, &st); pthread_cleanup_push((void *)set_exit_status, &st); /* Init status */ guppi_status_lock_safe(&st); hputs(st.buf, STATUS_KEY, "init"); guppi_status_unlock_safe(&st); /* Initialize some key parameters */ struct guppi_params gp; struct psrfits pf; pf.sub.data = NULL; pf.sub.dat_freqs = pf.sub.dat_weights = NULL; pf.sub.dat_offsets = pf.sub.dat_scales = NULL; pf.hdr.chan_dm = 0.0; pf.filenum = 0; // This is crucial pthread_cleanup_push((void *)guppi_free_psrfits, &pf); pthread_cleanup_push((void *)psrfits_close, &pf); //pf.multifile = 0; // Use a single file for fold mode pf.multifile = 1; // Use a multiple files for fold mode pf.quiet = 0; // Print a message per each subint written /* Attach to databuf shared mem */ struct guppi_databuf *db; db = guppi_databuf_attach(args->input_buffer); if (db==NULL) { guppi_error("guppi_psrfits_thread", "Error attaching to databuf shared memory."); pthread_exit(NULL); } pthread_cleanup_push((void *)guppi_databuf_detach, db); /* Loop */ int curblock=0, total_status=0, firsttime=1, run=1, got_packet_0=0; int mode=SEARCH_MODE; char *ptr; char tmpstr[256]; struct foldbuf fb; struct polyco pc[64]; memset(pc, 0, sizeof(pc)); int n_polyco_written=0; float *fold_output_array = NULL; int scan_finished=0; signal(SIGINT, cc); do { /* Note waiting status */ guppi_status_lock_safe(&st); if (got_packet_0) sprintf(tmpstr, "waiting(%d)", curblock); else sprintf(tmpstr, "ready"); hputs(st.buf, STATUS_KEY, tmpstr); guppi_status_unlock_safe(&st); /* Wait for buf to have data */ rv = guppi_databuf_wait_filled(db, curblock); if (rv!=0) { // This is a big ol' kludge to avoid this process hanging // due to thread synchronization problems. sleep(1); continue; } /* Note current block */ guppi_status_lock_safe(&st); hputi4(st.buf, "CURBLOCK", curblock); guppi_status_unlock_safe(&st); /* See how full databuf is */ total_status = guppi_databuf_total_status(db); /* Read param structs for this block */ ptr = guppi_databuf_header(db, curblock); if (firsttime) { guppi_read_obs_params(ptr, &gp, &pf); firsttime = 0; } else { guppi_read_subint_params(ptr, &gp, &pf); } /* Find out what mode this data is in */ mode = psrfits_obs_mode(pf.hdr.obs_mode); /* Check if we got both packet 0 and a valid observation * start time. If so, flag writing to start. */ if (got_packet_0==0 && gp.packetindex==0 && gp.stt_valid==1) { got_packet_0 = 1; guppi_read_obs_params(ptr, &gp, &pf); guppi_update_ds_params(&pf); memset(pc, 0, sizeof(pc)); n_polyco_written=0; } /* If actual observation has started, write the data */ if (got_packet_0) { /* Note waiting status */ guppi_status_lock_safe(&st); hputs(st.buf, STATUS_KEY, "writing"); guppi_status_unlock_safe(&st); /* Get the pointer to the current data */ if (mode==FOLD_MODE) { fb.nchan = pf.hdr.nchan; fb.npol = pf.hdr.npol; fb.nbin = pf.hdr.nbin; fb.data = (float *)guppi_databuf_data(db, curblock); fb.count = (unsigned *)(guppi_databuf_data(db, curblock) + foldbuf_data_size(&fb)); fold_output_array = (float *)realloc(fold_output_array, sizeof(float) * pf.hdr.nbin * pf.hdr.nchan * pf.hdr.npol); pf.sub.data = (unsigned char *)fold_output_array; pf.fold.pc = (struct polyco *)(guppi_databuf_data(db,curblock) + foldbuf_data_size(&fb) + foldbuf_count_size(&fb)); } else pf.sub.data = (unsigned char *)guppi_databuf_data(db, curblock); /* Set the DC and Nyquist channels explicitly to zero */ /* because of the "FFT Problem" that splits DC power */ /* into those two bins. */ zero_end_chans(&pf); /* Output only Stokes I (in place) */ if (pf.hdr.onlyI && pf.hdr.npol==4) get_stokes_I(&pf); /* Downsample in frequency (in place) */ if (pf.hdr.ds_freq_fact > 1) downsample_freq(&pf); /* Downsample in time (in place) */ if (pf.hdr.ds_time_fact > 1) downsample_time(&pf); /* Folded data needs a transpose */ if (mode==FOLD_MODE) normalize_transpose_folds(fold_output_array, &fb); /* Write the data */ int last_filenum = pf.filenum; psrfits_write_subint(&pf); /* Any actions that need to be taken when a new file * is created. */ if (pf.filenum!=last_filenum) { /* No polycos yet written to the new file */ n_polyco_written=0; } /* Write the polycos if needed */ int write_pc=0, i, j; for (i=0; i<pf.fold.n_polyco_sets; i++) { if (pf.fold.pc[i].used==0) continue; int new_pc=1; for (j=0; j<n_polyco_written; j++) { if (polycos_differ(&pf.fold.pc[i], &pc[j])==0) { new_pc=0; break; } } if (new_pc || n_polyco_written==0) { pc[n_polyco_written] = pf.fold.pc[i]; n_polyco_written++; write_pc=1; } else { pf.fold.pc[i].used = 0; // Already have this one } } if (write_pc) psrfits_write_polycos(&pf, pf.fold.pc, pf.fold.n_polyco_sets); /* Is the scan complete? */ if ((pf.hdr.scanlen > 0.0) && (pf.T > pf.hdr.scanlen)) scan_finished = 1; /* For debugging... */ if (gp.drop_frac > 0.0) { printf("Block %d dropped %.3g%% of the packets\n", pf.tot_rows, gp.drop_frac*100.0); } } /* Mark as free */ guppi_databuf_set_free(db, curblock); /* Go to next block */ curblock = (curblock + 1) % db->n_block; /* Check for cancel */ pthread_testcancel(); } while (run && !scan_finished); /* Cleanup */ if (fold_output_array!=NULL) free(fold_output_array); pthread_exit(NULL); pthread_cleanup_pop(0); /* Closes psrfits_close */ pthread_cleanup_pop(0); /* Closes guppi_free_psrfits */ pthread_cleanup_pop(0); /* Closes set_exit_status */ pthread_cleanup_pop(0); /* Closes set_finished */ pthread_cleanup_pop(0); /* Closes guppi_status_detach */ pthread_cleanup_pop(0); /* Closes guppi_databuf_detach */ }
void guppi_rawdisk_thread(void *_args) { /* Set cpu affinity */ cpu_set_t cpuset, cpuset_orig; sched_getaffinity(0, sizeof(cpu_set_t), &cpuset_orig); CPU_ZERO(&cpuset); CPU_SET(1, &cpuset); int rv = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset); if (rv<0) { guppi_error("guppi_rawdisk_thread", "Error setting cpu affinity."); perror("sched_setaffinity"); } /* Get args */ struct guppi_thread_args *args = (struct guppi_thread_args *)_args; /* Set priority */ rv = setpriority(PRIO_PROCESS, 0, 0); if (rv<0) { guppi_error("guppi_rawdisk_thread", "Error setting priority level."); perror("set_priority"); } /* Attach to status shared mem area */ struct guppi_status st; rv = guppi_status_attach(&st); if (rv!=GUPPI_OK) { guppi_error("guppi_rawdisk_thread", "Error attaching to status shared memory."); pthread_exit(NULL); } pthread_cleanup_push((void *)guppi_status_detach, &st); pthread_cleanup_push((void *)set_exit_status, &st); /* Init status */ guppi_status_lock_safe(&st); hputs(st.buf, STATUS_KEY, "init"); guppi_status_unlock_safe(&st); /* Read in general parameters */ struct guppi_params gp; #if FITS_TYPE == PSRFITS struct sdfits pf; pf.sub.dat_freqs = NULL; pf.sub.dat_weights = NULL; pf.sub.dat_offsets = NULL; pf.sub.dat_scales = NULL; pthread_cleanup_push((void *)guppi_free_psrfits, &pf); #else struct sdfits pf; pthread_cleanup_push((void *)guppi_free_sdfits, &pf); #endif /* Attach to databuf shared mem */ struct guppi_databuf *db; db = guppi_databuf_attach(args->input_buffer); if (db==NULL) { guppi_error("guppi_rawdisk_thread", "Error attaching to databuf shared memory."); pthread_exit(NULL); } pthread_cleanup_push((void *)guppi_databuf_detach, db); /* Init output file */ FILE *fraw = NULL; pthread_cleanup_push((void *)safe_fclose, fraw); /* Pointers for quantization params */ double *mean = NULL; double *std = NULL; printf("casper: raw disk thread created and running\n"); /* Loop */ int packetidx=0, npacket=0, ndrop=0, packetsize=0, blocksize=0; int orig_blocksize=0; int curblock=0; int block_count=0, blocks_per_file=128, filenum=0; int got_packet_0=0, first=1; int requantize = 0; char *ptr, *hend; signal(SIGINT,cc); while (run_threads) { /* Note waiting status */ guppi_status_lock_safe(&st); hputs(st.buf, STATUS_KEY, "waiting"); guppi_status_unlock_safe(&st); /* Wait for buf to have data */ rv = guppi_databuf_wait_filled(db, curblock); if (rv!=0) continue; printf("casper: raw disk thread rcvd data.\n"); /* Read param struct for this block */ ptr = guppi_databuf_header(db, curblock); if (first) { guppi_read_obs_params(ptr, &gp, &pf); first = 0; } else { guppi_read_subint_params(ptr, &gp, &pf); } /* Parse packet size, npacket from header */ hgeti4(ptr, "PKTIDX", &packetidx); hgeti4(ptr, "PKTSIZE", &packetsize); hgeti4(ptr, "NPKT", &npacket); hgeti4(ptr, "NDROP", &ndrop); #if FITS_TYPE == PSR_FITS /* Check for re-quantization flag */ int nbits_req = 0; if (hgeti4(ptr, "NBITSREQ", &nbits_req) == 0) { /* Param not present, don't requantize */ requantize = 0; } else { /* Param is present */ if (nbits_req==8) requantize = 0; else if (nbits_req==2) requantize = 1; else /* Invalid selection for requested nbits * .. die or ignore? */ requantize = 0; } #endif /* Set up data ptr for quant routines */ #if FITS_TYPE == PSR_FITS pf.sub.data = (unsigned char *)guppi_databuf_data(db, curblock); #else pf.data_columns.data = (unsigned char *)guppi_databuf_data(db, curblock); #endif /* Wait for packet 0 before starting write */ if (got_packet_0==0 && packetidx==0 && gp.stt_valid==1) { got_packet_0 = 1; guppi_read_obs_params(ptr, &gp, &pf); #if FITS_TYPE == PSR_FITS orig_blocksize = pf.sub.bytes_per_subint; #endif char fname[256]; sprintf(fname, "%s.%4.4d.raw", pf.basefilename, filenum); fprintf(stderr, "Opening raw file '%s'\n", fname); // TODO: check for file exist. fraw = fopen(fname, "w"); if (fraw==NULL) { guppi_error("guppi_rawdisk_thread", "Error opening file."); pthread_exit(NULL); } #if FITS_TYPE == PSR_FITS /* Determine scaling factors for quantization if appropriate */ if (requantize) { mean = (double *)realloc(mean, pf.hdr.rcvr_polns * pf.hdr.nchan * sizeof(double)); std = (double *)realloc(std, pf.hdr.rcvr_polns * pf.hdr.nchan * sizeof(double)); compute_stat(&pf, mean, std); fprintf(stderr, "Computed 2-bit stats\n"); } #endif } /* See if we need to open next file */ if (block_count >= blocks_per_file) { fclose(fraw); filenum++; char fname[256]; sprintf(fname, "%s.%4.4d.raw", pf.basefilename, filenum); fprintf(stderr, "Opening raw file '%s'\n", fname); fraw = fopen(fname, "w"); if (fraw==NULL) { guppi_error("guppi_rawdisk_thread", "Error opening file."); pthread_exit(NULL); } block_count=0; } /* See how full databuf is */ //total_status = guppi_databuf_total_status(db); /* Requantize from 8 bits to 2 bits if necessary. * See raw_quant.c for more usage examples. */ #if FITS_TYPE == PSR_FITS if (requantize && got_packet_0) { pf.sub.bytes_per_subint = orig_blocksize; /* Does the quantization in-place */ quantize_2bit(&pf, mean, std); /* Update some parameters for output */ hputi4(ptr, "BLOCSIZE", pf.sub.bytes_per_subint); hputi4(ptr, "NBITS", pf.hdr.nbits); } #endif /* Get full data block size */ hgeti4(ptr, "BLOCSIZE", &blocksize); /* If we got packet 0, write data to disk */ if (got_packet_0) { /* Note waiting status */ guppi_status_lock_safe(&st); hputs(st.buf, STATUS_KEY, "writing"); guppi_status_unlock_safe(&st); /* Write header to file */ hend = ksearch(ptr, "END"); for (ptr=ptr; ptr<=hend; ptr+=80) { fwrite(ptr, 80, 1, fraw); } /* Write data */ printf("block size: %d\n", blocksize); ptr = guppi_databuf_data(db, curblock); rv = fwrite(ptr, 1, (size_t)blocksize, fraw); if (rv != blocksize) { guppi_error("guppi_rawdisk_thread", "Error writing data."); } /* Increment counter */ block_count++; /* flush output */ fflush(fraw); } /* Mark as free */ guppi_databuf_set_free(db, curblock); /* Go to next block */ curblock = (curblock + 1) % db->n_block; /* Check for cancel */ pthread_testcancel(); } pthread_exit(NULL); pthread_cleanup_pop(0); /* Closes fclose */ pthread_cleanup_pop(0); /* Closes guppi_databuf_detach */ pthread_cleanup_pop(0); /* Closes guppi_free_psrfits */ pthread_cleanup_pop(0); /* Closes set_exit_status */ pthread_cleanup_pop(0); /* Closes guppi_status_detach */ }
void guppi_null_thread(void *_args) { int rv; /* Set cpu affinity */ cpu_set_t cpuset, cpuset_orig; sched_getaffinity(0, sizeof(cpu_set_t), &cpuset_orig); CPU_ZERO(&cpuset); CPU_SET(6, &cpuset); rv = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset); if (rv<0) { guppi_error("guppi_null_thread", "Error setting cpu affinity."); perror("sched_setaffinity"); } /* Set priority */ rv = setpriority(PRIO_PROCESS, 0, 0); if (rv<0) { guppi_error("guppi_null_thread", "Error setting priority level."); perror("set_priority"); } /* Get args */ struct guppi_thread_args *args = (struct guppi_thread_args *)_args; /* Attach to status shared mem area */ struct guppi_status st; rv = guppi_status_attach(&st); if (rv!=GUPPI_OK) { guppi_error("guppi_null_thread", "Error attaching to status shared memory."); pthread_exit(NULL); } pthread_cleanup_push((void *)guppi_status_detach, &st); pthread_cleanup_push((void *)set_exit_status, &st); /* Init status */ guppi_status_lock_safe(&st); hputs(st.buf, STATUS_KEY, "init"); guppi_status_unlock_safe(&st); /* Attach to databuf shared mem */ struct guppi_databuf *db; db = guppi_databuf_attach(args->input_buffer); if (db==NULL) { guppi_error("guppi_null_thread", "Error attaching to databuf shared memory."); pthread_exit(NULL); } pthread_cleanup_push((void *)guppi_databuf_detach, db); /* Loop */ char *ptr; struct guppi_params gp; #if FITS_TYPE == PSRFITS struct psrfits pf; pf.sub.dat_freqs = NULL; pf.sub.dat_weights = NULL; pf.sub.dat_offsets = NULL; pf.sub.dat_scales = NULL; pthread_cleanup_push((void *)guppi_free_psrfits, &pf); #else struct sdfits pf; pthread_cleanup_push((void *)guppi_free_sdfits, &pf); #endif int curblock=0; signal(SIGINT,cc); while (run_threads) { /* Note waiting status */ guppi_status_lock_safe(&st); hputs(st.buf, STATUS_KEY, "waiting"); guppi_status_unlock_safe(&st); /* Wait for buf to have data */ rv = guppi_databuf_wait_filled(db, curblock); if (rv!=0) { //sleep(1); continue; } /* Note waiting status, current block */ guppi_status_lock_safe(&st); hputs(st.buf, STATUS_KEY, "discarding"); hputi4(st.buf, "DSKBLKIN", curblock); guppi_status_unlock_safe(&st); /* Get params */ ptr = guppi_databuf_header(db, curblock); guppi_read_obs_params(ptr, &gp, &pf); /* Output if data was lost */ #if FITS_TYPE == PSRFITS if (gp.n_dropped!=0 && (gp.packetindex==0 || strcmp(pf.hdr.obs_mode,"SEARCH"))) { printf("Block beginning with pktidx=%lld dropped %d packets\n", gp.packetindex, gp.n_dropped); fflush(stdout); } #else if (gp.num_pkts_dropped!=0 && gp.num_pkts_rcvd!=0) { printf("Block received %d packets and dropped %d packets\n", gp.num_pkts_rcvd, gp.num_pkts_dropped); fflush(stdout); } #endif /* Mark as free */ guppi_databuf_set_free(db, curblock); /* Go to next block */ curblock = (curblock + 1) % db->n_block; /* Check for cancel */ pthread_testcancel(); } pthread_exit(NULL); pthread_cleanup_pop(0); /* Closes set_exit_status */ pthread_cleanup_pop(0); /* Closes guppi_free_psrfits */ pthread_cleanup_pop(0); /* Closes guppi_status_detach */ pthread_cleanup_pop(0); /* Closes guppi_databuf_detach */ }
void vegas_pfb_thread(void *_args) { /* Get args */ struct guppi_thread_args *args = (struct guppi_thread_args *)_args; int rv; /* Set cpu affinity */ cpu_set_t cpuset, cpuset_orig; sched_getaffinity(0, sizeof(cpu_set_t), &cpuset_orig); //CPU_ZERO(&cpuset); CPU_CLR(13, &cpuset); CPU_SET(11, &cpuset); rv = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset); if (rv<0) { guppi_error("vegas_pfb_thread", "Error setting cpu affinity."); perror("sched_setaffinity"); } /* Set priority */ rv = setpriority(PRIO_PROCESS, 0, args->priority); if (rv<0) { guppi_error("vegas_pfb_thread", "Error setting priority level."); perror("set_priority"); } /* Attach to status shared mem area */ struct guppi_status st; rv = guppi_status_attach(&st); if (rv!=GUPPI_OK) { guppi_error("vegas_pfb_thread", "Error attaching to status shared memory."); pthread_exit(NULL); } pthread_cleanup_push((void *)guppi_status_detach, &st); pthread_cleanup_push((void *)set_exit_status, &st); pthread_cleanup_push((void *)guppi_thread_set_finished, args); /* Init status */ guppi_status_lock_safe(&st); hputs(st.buf, STATUS_KEY, "init"); guppi_status_unlock_safe(&st); /* Init structs */ struct guppi_params gp; struct sdfits sf; pthread_cleanup_push((void *)guppi_free_sdfits, &sf); /* Attach to databuf shared mem */ struct guppi_databuf *db_in, *db_out; db_in = guppi_databuf_attach(args->input_buffer); if (db_in==NULL) { char msg[256]; sprintf(msg, "Error attaching to databuf(%d) shared memory.", args->input_buffer); guppi_error("vegas_pfb_thread", msg); pthread_exit(NULL); } pthread_cleanup_push((void *)guppi_databuf_detach, db_in); db_out = guppi_databuf_attach(args->output_buffer); if (db_out==NULL) { char msg[256]; sprintf(msg, "Error attaching to databuf(%d) shared memory.", args->output_buffer); guppi_error("vegas_pfb_thread", msg); pthread_exit(NULL); } pthread_cleanup_push((void *)guppi_databuf_detach, db_out); /* Loop */ char *hdr_in = NULL; int curblock_in=0; int first=1; int acc_len = 0; int nchan = 0; int nsubband = 0; signal(SIGINT,cc); guppi_status_lock_safe(&st); if (hgeti4(st.buf, "NCHAN", &nchan)==0) { fprintf(stderr, "ERROR: %s not in status shm!\n", "NCHAN"); } if (hgeti4(st.buf, "NSUBBAND", &nsubband)==0) { fprintf(stderr, "ERROR: %s not in status shm!\n", "NSUBBAND"); } guppi_status_unlock_safe(&st); if (EXIT_SUCCESS != init_gpu(db_in->block_size, db_out->block_size, nsubband, nchan)) { (void) fprintf(stderr, "ERROR: GPU initialisation failed!\n"); run = 0; } while (run) { /* Note waiting status */ guppi_status_lock_safe(&st); hputs(st.buf, STATUS_KEY, "waiting"); guppi_status_unlock_safe(&st); /* Wait for buf to have data */ rv = guppi_databuf_wait_filled(db_in, curblock_in); if (rv!=0) continue; /* Note waiting status, current input block */ guppi_status_lock_safe(&st); hputs(st.buf, STATUS_KEY, "processing"); hputi4(st.buf, "PFBBLKIN", curblock_in); guppi_status_unlock_safe(&st); hdr_in = guppi_databuf_header(db_in, curblock_in); /* Get params */ if (first) { guppi_read_obs_params(hdr_in, &gp, &sf); /* Read required exposure from status shared memory, and calculate corresponding accumulation length */ acc_len = (sf.hdr.chan_bw * sf.hdr.hwexposr); } guppi_read_subint_params(hdr_in, &gp, &sf); /* Call PFB function */ do_pfb(db_in, curblock_in, db_out, first, st, acc_len); /* Mark input block as free */ guppi_databuf_set_free(db_in, curblock_in); /* Go to next input block */ curblock_in = (curblock_in + 1) % db_in->n_block; /* Check for cancel */ pthread_testcancel(); if (first) { first=0; } } run=0; //cudaThreadExit(); pthread_exit(NULL); cleanup_gpu(); pthread_cleanup_pop(0); /* Closes guppi_databuf_detach(out) */ pthread_cleanup_pop(0); /* Closes guppi_databuf_detach(in) */ pthread_cleanup_pop(0); /* Closes guppi_free_sdfits */ pthread_cleanup_pop(0); /* Closes guppi_thread_set_finished */ pthread_cleanup_pop(0); /* Closes set_exit_status */ pthread_cleanup_pop(0); /* Closes guppi_status_detach */ }