Esempio n. 1
0
//test des ins
int test1(int argc, char **argv){
  int i,j;
  
  hputs("Lecture des entrees...\n");
  
  MYFTPP_init();
  
  MYFTPP_runIOThread();
  printf("  ");
  for (j=1; j<=8; j++)
		printf(" |  E%d", j);
  printf("\n");
  for (j=0; j<50; j++)
		printf("-");
  printf("\n");
  for (i=0; i<25; i++){
      usleep(100000);
	  printf("%2d", i);
	  for (j=1; j<=8; j++){
		if (MYFTPP_ISON_E(j))
			printf(" | \033[32mON!\033[0m");
		else
			printf(" | \033[31mOFF\033[0m");
	  }
	  printf("\n");
  }
  MYFTPP_setMotors(OFF, OFF, OFF, OFF);
  MYFTPP_stopIOThread();
  MYFTPP_exit();
  
  return 0;
}
Esempio n. 2
0
static uval
launch_partition(struct partition_status *ps, struct load_file_entry* lf,
		 uval ofd)
{
	uval ret[1];
	uval rc;
	const uval defslot = 0x1;
	uval schedvals[3];
	sval rot;

	uval magic = lf->loaderSegmentTable[0].file_start;
	magic += 0x10;

	if (*(uval64*)magic == HYPE_PARTITION_INFO_MAGIC_NUMBER) {
		magic = *((uval*)(magic + 8));
	} else {
		magic = INVALID_LOGICAL_ADDRESS;
	}

	hputs("Creating partition\n");
	rc = hcall_create_partition(ret, ps->init_mem, ps->init_mem_size,
				    magic);
	assert(rc == H_Success,
	       "hcall_create_partition() failed rc= 0x%lx\n", rc);

	hprintf("Created partition  ID %ld\n"
		"  starting it @ pc = 0x%llx.\n",
		ret[0], lf->va_entry);
	ps->lpid = ret[0];

	vty_setup(ps);

	rot = hcall_set_sched_params(schedvals, ps->lpid,
				     THIS_CPU, defslot, 0);
	assert(rot >= 0, "set_sched failed\n");
	hprintf("Scheduled 0x%lx: rot: %ld (0x%016lx, 0x%016lx 0x%016lx)\n",
		ps->lpid, rot, schedvals[0], schedvals[1], schedvals[2]);


	curslots = schedvals[0];
	ps->slot = schedvals[2];

	register_partition(ps, ofd);

	if (!start_partition(ps, lf, ofd)) {
		return 0;
	}

	hprintf("%s: yielding to sysId: 0x%lx now\n", __func__, ps->lpid);
	hcall_yield(NULL, ps->lpid);
	hprintf("%s: back from yielding to sysId: 0x%lx\n",
		__func__, ps->lpid);


	return ps->lpid;
}
Esempio n. 3
0
static void
ask_destroy_partition(void)
{
	char inbuf[17];
	uval sz;
	int done = 0;
	int i;

	while (!done) {
		uval val;
		hputs("Choose the partition to destroy\n");
		/* list active partitions */
		for (i = 0; i < MAX_MANAGED_PARTITIONS; ++i) {
			if (partitions[i].active == 1) {
				hprintf("  %02d: 0x%lx: %s\n", i,
					partitions[i].lpid,
					partitions[i].name);
			}
		}

		hputs("  e: exit this menu\n");
		hputs("Choice: ");

		sz = get_input(inbuf);
		inbuf[16] = '\0';
		if (sz == 0) continue;

		if (inbuf[0] == 'e') {
			/* fall into the destory menu */
			return;
		}

		val = strtoul(inbuf, NULL, 0);
		if (!((val < MAX_MANAGED_PARTITIONS) &&
		      (partitions[val].active == 1))) {
			hprintf("0x%lx is not a valid choice\n", val);
			continue;
		}
		destroy_partition(val);

	}
}
Esempio n. 4
0
void usage(int ret)
{
	FILE *stream = (ret ? stderr : stdout);
#define hputs(x) fputs(x"\n", stream)
	hputs("pacini - query pacman-style configuration file");
	hputs("usage:  pacini [options] <file> [<directive>...]");
	hputs("        pacini (--section-list|--help|--version)");
	hputs("options:");
	hputs("  --section=<name>  query options for a specific section");
	hputs("  --verbose         always show directive names");
	hputs("  --section-list    list configured sections");
	hputs("  --help            display this help information");
	hputs("  --version         display version information");
#undef hputs
	cleanup();
	exit(ret);
}
Esempio n. 5
0
void
__cyg_profile_func_exit(void *this_fn, void *call_site)
{
	prof_exit_cnt++;
	if (trace_status == trace_enabled) {
		snprintf(buf, sizeof (buf) - 1,
			 "func_exit [%d] %ld %p:%p\n",
			 call_depth--, prof_cnt1++, this_fn, call_site);
		hputs(buf);
	}
	prof_exit_cnt--;
}
Esempio n. 6
0
//test des outs
int test2(int argc, char **argv){
	int i;
  hputs("Allumage des moteurs...\n");
	MYFTPP_runIOThread();
	MYFTPP_MM(MYFTPP_M_CW, MYFTPP_M_CW, MYFTPP_M_CW, MYFTPP_M_CW);
	for (i=1; i<=5; i++){
		printf("%d\n", i);
		usleep(1000000);
	}
	MYFTPP_MM(MYFTPP_M_OFF, MYFTPP_M_OFF, MYFTPP_M_OFF, MYFTPP_M_OFF);
	MYFTPP_stopIOThread();
	return 0;
}
Esempio n. 7
0
void
assprint(const char *expr, const char *file, int line, const char *fmt, ...)
{
	va_list ap;

	/* I like compiler like output */
	hprintf("%s:%d: ASSERT(%s)\n", file, line, expr);

	va_start(ap, fmt);
	vhprintf(fmt, ap);
	va_end(ap);
	hputs("\n");

	breakpoint();
}
Esempio n. 8
0
File: ofd.c Progetto: jiamacs/rhype
static ofdn_t
ofd_chosen_props(void *m)
{
	ofdn_t n;
	ofdn_t p;
	static const char path[] = "/chosen";
	static const char console[] = " console=hvc0 nosmp";
	char b[257];
	uval sz = 0;

//		" root=/dev/hda "
//		" rootfstype=ramfs "
//		"init=/bin/sh "
//		"root=/dev/nfsroot "
//		"nfsroot=9.2.208.21:/,rsize=1024,wsize=1024 "
//		"ether=0,0,eth0 "
//nfsaddrs=<wst-IP>  :<srv-IP>  :<gw-IP>  :<netm-IP>    :<hostname>
//"nfsaddrs=9.2.208.161:9.2.208.21:9.2.208.2:255.255.248.0:freakazoid "
//"nfsaddrs=9.2.208.161:9.2.208.21:9.2.208.2:255.255.248.0:freakazoid:eth0 "

	n = ofd_node_find(m, path);
	if (n == 0) {
		n = ofd_node_add(m, OFD_ROOT, path, sizeof (path));
		ofd_prop_add(m, n, "name",
				&path[1], sizeof (path) - 1);
	}

	sz = strlen(default_bootargs);
	memcpy(b, default_bootargs, sz);

	assert(sz + sizeof(console) <= 256,
	       "boot args not big enough\n");

	memcpy(b + sz, console, sizeof (console));
	sz += sizeof (console);

	ofd_prop_add(m, n, "bootargs", b, sz);

	ofd_prop_add(m, n, "bootpath", NULL, 0);

	hputs("Remove /chosen/mmu, stub will replace\n");
	p = ofd_prop_find(m, n, "mmu");
	if (p > 0) {
		ofd_prop_remove(m, n, p);
	}

	return n;
}
Esempio n. 9
0
static void print_dline(hFILE *f, char *key, digest_line_t *dline, int p)
{
    hputs(key, f); hputc('\t',f); 
    hputs((p ? "pass": "******"), f); hputc('\t',f);
    hputi(dline->count[p], f); hputc('\t',f);
    hputc('\t',f);
    hputs(dformat(dline->chksum[0][p]), f); hputc('\t',f);
    hputs(dformat(dline->chksum[1][p]), f); hputc('\t',f);
    hputs(dformat(dline->chksum[2][p]), f); hputc('\t',f);
    hputs(dformat(dline->chksum[3][p]), f); hputc('\n',f);
}
Esempio n. 10
0
void chksum_print_results(hFILE *f, chksum_results_t *results)
{
    digest_line_t *dline = &(results->all);

    hputs("###\tset\tcount\t\tb_seq\tname_b_seq\tb_seq_qual\tb_seq_tags(BC,FI,QT,RT,TC)\n", f);

    print_dline(f, "all", dline, 0);
    print_dline(f, "all", dline, 1);

    HashIter *iter = HashTableIterCreate();
    HashItem *hi;
    while ( (hi = HashTableIterNext(results->rgHash, iter)) != NULL) {
        print_dline(f, hi->key, hi->data.p, 0);
        print_dline(f, hi->key, hi->data.p, 1);
    }
    HashTableIterDestroy(iter);
}
Esempio n. 11
0
uval
test_os(void)
{
	reg_t mylpid;
	reg_t regsave[32];
	int i=0;
	(void)hcall_get_lpid(&mylpid);

	ctxt_count = ((uval64)mylpid) << 32;
	hputs("You should see repeated 'SUCCESS' messages.\n"
	      "This test will run until manually halted.\n");
	if (mfpir() == 0)
	    hcall_thread_control(regsave, HTC_START, 1, 0);
	while (1) {
	    hprintf("multi: %d:%d\n", mfpir(), i);
	    ++i;
	}

	return 0;
}
Esempio n. 12
0
VALUE rb_hps_hputs(VALUE self, VALUE vkey, VALUE vval)
{
  int rc;
  hashpipe_status_t *s;
  char save = '\0';
  const char * val = StringValueCStr(vval);
  char * key = StringValueCStr(vkey);
  if(strlen(key) > 8) {
    rb_warning("key '%s' truncated to 8 characters", key);
    save = key[8];
    key[8] = '\0';
  }
  Data_Get_HPStruct_Ensure_Attached(self, s);
  rc = hputs(s->buf, key, val);
  if(save) key[8] = save;
  if(rc)
    // Currently, the only error return is if header length is exceeded
    rb_raise(rb_eRuntimeError, "header length exceeded");

  return self;
}
Esempio n. 13
0
static void *run_method(hashpipe_thread_args_t * args)
{
	int rv = 0;
	easy_in_output_databuf_t *db_out = (easy_in_output_databuf_t *)args->obuf;
	hashpipe_status_t st = args->st;
	const char * status_key = args->thread_desc->skey;
	
	int idx_data = 0;
	char data = 'a';
	while (run_threads())
	{
		while ((rv=hashpipe_databuf_wait_free((hashpipe_databuf_t *)db_out, idx_data)) != HASHPIPE_OK) {
			if (rv==HASHPIPE_TIMEOUT) {
				hashpipe_status_lock_safe(&st);
				hputs(st.buf, status_key, "blocked_in");
				hashpipe_status_unlock_safe(&st);
				continue;
			} else {
				hashpipe_error(__FUNCTION__, "error waiting for free databuf");
				pthread_exit(NULL);
				break;
			}
		}
		#ifdef DEBUG
			fprintf(stdout,"easy_in_thread:\n");
			fprintf(stdout,"\tcount = %d\n",db_out->count);
			fprintf(stdout,"\tdata[%d] = %c\n",idx_data,'a' + (char)(db_out->count % 26));
		#endif
		db_out->data[idx_data] = 'a' + (char)(db_out->count % 26);
		db_out->count++;
		hashpipe_databuf_set_filled((hashpipe_databuf_t *)db_out, idx_data);
		idx_data = (idx_data + 1) % db_out->header.n_block;
		
		pthread_testcancel();
	}
	// Thread success!
	return NULL;
}
Esempio n. 14
0
/* The main CPU accumulator thread */
void guppi_accum_thread(void *_args) {

    float **accumulator;      //indexed accumulator[accum_id][chan][subband][stokes]
    char accum_dirty[NUM_SW_STATES];
    struct sdfits_data_columns data_cols[NUM_SW_STATES];
    int payload_type;
    int i, j, k, rv;

    /* Get arguments */
    struct guppi_thread_args *args = (struct guppi_thread_args *)_args;

    /* Set cpu affinity */
    cpu_set_t cpuset, cpuset_orig;
    sched_getaffinity(0, sizeof(cpu_set_t), &cpuset_orig);
    //CPU_ZERO(&cpuset);
    CPU_CLR(13, &cpuset);
    CPU_SET(9, &cpuset);
    rv = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset);
    if (rv<0) { 
        guppi_error("guppi_accum_thread", "Error setting cpu affinity.");
        perror("sched_setaffinity");
    }

    /* Set priority */
    rv = setpriority(PRIO_PROCESS, 0, args->priority);
    if (rv<0) {
        guppi_error("guppi_accum_thread", "Error setting priority level.");
        perror("set_priority");
    }

    /* Attach to status shared mem area */
    struct guppi_status st;
    rv = guppi_status_attach(&st);
    if (rv!=GUPPI_OK) {
        guppi_error("guppi_accum_thread", 
                "Error attaching to status shared memory.");
        pthread_exit(NULL);
    }
    pthread_cleanup_push((void *)guppi_status_detach, &st);
    pthread_cleanup_push((void *)set_exit_status, &st);
    pthread_cleanup_push((void *)guppi_thread_set_finished, args);

    /* Init status */
    guppi_status_lock_safe(&st);
    hputs(st.buf, STATUS_KEY, "init");
    guppi_status_unlock_safe(&st);

    /* Read in general parameters */
    struct guppi_params gp;
    struct sdfits sf;
    pthread_cleanup_push((void *)guppi_free_sdfits, &sf);

    /* Attach to databuf shared mem */
    struct guppi_databuf *db_in, *db_out;
    db_in = guppi_databuf_attach(args->input_buffer);
    char errmsg[256];
    if (db_in==NULL) {
        sprintf(errmsg,
                "Error attaching to input databuf(%d) shared memory.", 
                args->input_buffer);
        guppi_error("guppi_accum_thread", errmsg);
        pthread_exit(NULL);
    }
    pthread_cleanup_push((void *)guppi_databuf_detach, db_in);
    db_out = guppi_databuf_attach(args->output_buffer);
    if (db_out==NULL) {
        sprintf(errmsg,
                "Error attaching to output databuf(%d) shared memory.", 
                args->output_buffer);
        guppi_error("guppi_accum_thread", errmsg);
        pthread_exit(NULL);
    }
    pthread_cleanup_push((void *)guppi_databuf_detach, db_out);

    /* Determine high/low bandwidth mode */
    char bw_mode[16];
    if (hgets(st.buf, "BW_MODE", 16, bw_mode))
    {
        if(strncmp(bw_mode, "high", 4) == 0)
            payload_type = INT_PAYLOAD;
        else if(strncmp(bw_mode, "low", 3) == 0)
            payload_type = FLOAT_PAYLOAD;
        else
            guppi_error("guppi_net_thread", "Unsupported bandwidth mode");
    }
    else
        guppi_error("guppi_net_thread", "BW_MODE not set");

    /* Read nchan and nsubband from status shared memory */
    guppi_read_obs_params(st.buf, &gp, &sf);

    /* Allocate memory for vector accumulators */
    create_accumulators(&accumulator, sf.hdr.nchan, sf.hdr.nsubband);
    pthread_cleanup_push((void *)destroy_accumulators, accumulator);

    /* Clear the vector accumulators */
    for(i = 0; i < NUM_SW_STATES; i++) accum_dirty[i] = 1;
    reset_accumulators(accumulator, data_cols, accum_dirty, sf.hdr.nsubband, sf.hdr.nchan);

    /* Loop */
    int curblock_in=0, curblock_out=0;
    int first=1;
    float reqd_exposure=0;
    double accum_time=0;
    int integ_num;
    float pfb_rate;
    int heap, accumid, struct_offset, array_offset;
    char *hdr_in=NULL, *hdr_out=NULL;
    struct databuf_index *index_in, *index_out;

    int nblock_int=0, npacket=0, n_pkt_drop=0, n_heap_drop=0;

    signal(SIGINT,cc);
    while (run) {

        /* Note waiting status */
        guppi_status_lock_safe(&st);
        hputs(st.buf, STATUS_KEY, "waiting");
        guppi_status_unlock_safe(&st);

        /* Wait for buf to have data */
        rv = guppi_databuf_wait_filled(db_in, curblock_in);
        if (rv!=0) continue;

        /* Note waiting status and current block*/
        guppi_status_lock_safe(&st);
        hputs(st.buf, STATUS_KEY, "accumulating");
        hputi4(st.buf, "ACCBLKIN", curblock_in);
        guppi_status_unlock_safe(&st);

        /* Read param struct for this block */
        hdr_in = guppi_databuf_header(db_in, curblock_in);
        if (first) 
            guppi_read_obs_params(hdr_in, &gp, &sf);
        else
            guppi_read_subint_params(hdr_in, &gp, &sf);

        /* Do any first time stuff: first time code runs, not first time process this block */
        if (first) {

            /* Set up first output header. This header is copied from block to block
               each time a new block is created */
            hdr_out = guppi_databuf_header(db_out, curblock_out);
            memcpy(hdr_out, guppi_databuf_header(db_in, curblock_in),
                    GUPPI_STATUS_SIZE);

            /* Read required exposure and PFB rate from status shared memory */
            reqd_exposure = sf.data_columns.exposure;
            pfb_rate = sf.hdr.efsampfr / (2 * sf.hdr.nchan);

            /* Initialise the index in the output block */
            index_out = (struct databuf_index*)guppi_databuf_index(db_out, curblock_out);
            index_out->num_datasets = 0;
            index_out->array_size = sf.hdr.nsubband * sf.hdr.nchan * NUM_STOKES * 4;

            first=0;
        }

        /* Loop through each spectrum (heap) in input buffer */
        index_in = (struct databuf_index*)guppi_databuf_index(db_in, curblock_in);

        for(heap = 0; heap < index_in->num_heaps; heap++)
        {
            /* If invalid, record it and move to next heap */
            if(!index_in->cpu_gpu_buf[heap].heap_valid)
            {
                n_heap_drop++;
                continue;
            }

            /* Read in heap from buffer */
            char* heap_addr = (char*)(guppi_databuf_data(db_in, curblock_in) +
                                sizeof(struct freq_spead_heap) * heap);
            struct freq_spead_heap* freq_heap = (struct freq_spead_heap*)(heap_addr);

            char* payload_addr = (char*)(guppi_databuf_data(db_in, curblock_in) +
                                sizeof(struct freq_spead_heap) * MAX_HEAPS_PER_BLK +
                                (index_in->heap_size - sizeof(struct freq_spead_heap)) * heap );
            int *i_payload = (int*)(payload_addr);
            float *f_payload = (float*)(payload_addr);

            accumid = freq_heap->status_bits & 0x7;         

            /*Debug: print heap */
/*            printf("%d, %d, %d, %d, %d, %d\n", freq_heap->time_cntr, freq_heap->spectrum_cntr,
                freq_heap->integ_size, freq_heap->mode, freq_heap->status_bits,
                freq_heap->payload_data_off);
*/

            /* If we have accumulated for long enough, write vectors to output block */
            if(accum_time >= reqd_exposure)
            {
                for(i = 0; i < NUM_SW_STATES; i++)
                {
                    /*If a particular accumulator is dirty, write it to output buffer */
                    if(accum_dirty[i])
                    {
                        /*If insufficient space, first mark block as filled and request new block*/
                        index_out = (struct databuf_index*)(guppi_databuf_index(db_out, curblock_out));

                        if( (index_out->num_datasets+1) *
                            (index_out->array_size + sizeof(struct sdfits_data_columns)) > 
                            db_out->block_size)
                        {
                            printf("Accumulator finished with output block %d\n", curblock_out);

                            /* Write block number to status buffer */
                            guppi_status_lock_safe(&st);
                            hputi4(st.buf, "ACCBLKOU", curblock_out);
                            guppi_status_unlock_safe(&st);

                            /* Update packet count and loss fields in output header */
                            hputi4(hdr_out, "NBLOCK", nblock_int);
                            hputi4(hdr_out, "NPKT", npacket);
                            hputi4(hdr_out, "NPKTDROP", n_pkt_drop);
                            hputi4(hdr_out, "NHPDROP", n_heap_drop);

                            /* Close out current integration */
                            guppi_databuf_set_filled(db_out, curblock_out);

                            /* Wait for next output buf */
                            curblock_out = (curblock_out + 1) % db_out->n_block;
                            guppi_databuf_wait_free(db_out, curblock_out);

                            while ((rv=guppi_databuf_wait_free(db_out, curblock_out)) != GUPPI_OK)
                            {
                                if (rv==GUPPI_TIMEOUT) {
                                    guppi_warn("guppi_accum_thread", "timeout while waiting for output block");
                                    continue;
                                } else {
                                    guppi_error("guppi_accum_thread", "error waiting for free databuf");
                                    run=0;
                                    pthread_exit(NULL);
                                    break;
                                }
                            }

                            hdr_out = guppi_databuf_header(db_out, curblock_out);
                            memcpy(hdr_out, guppi_databuf_header(db_in, curblock_in),
                                    GUPPI_STATUS_SIZE);

                            /* Initialise the index in new output block */
                            index_out = (struct databuf_index*)guppi_databuf_index(db_out, curblock_out);
                            index_out->num_datasets = 0;
                            index_out->array_size = sf.hdr.nsubband * sf.hdr.nchan * NUM_STOKES * 4;
                            
                            nblock_int=0;
                            npacket=0;
                            n_pkt_drop=0;
                            n_heap_drop=0;
                        }            

                        /*Update index for output buffer*/
                        index_out = (struct databuf_index*)(guppi_databuf_index(db_out, curblock_out));

                        if(index_out->num_datasets == 0)
                            struct_offset = 0;
                        else
                            struct_offset = index_out->disk_buf[index_out->num_datasets-1].array_offset +
                                            index_out->array_size;

                        array_offset =  struct_offset + sizeof(struct sdfits_data_columns);
                        index_out->disk_buf[index_out->num_datasets].struct_offset = struct_offset;
                        index_out->disk_buf[index_out->num_datasets].array_offset = array_offset;

                        /*Copy sdfits_data_columns struct to disk buffer */
                        memcpy(guppi_databuf_data(db_out, curblock_out) + struct_offset,
                                &data_cols[i], sizeof(struct sdfits_data_columns));

                        /*Copy data array to disk buffer */
                        memcpy(guppi_databuf_data(db_out, curblock_out) + array_offset,
                                accumulator[i], index_out->array_size);
                        
                        /*Update SDFITS data_columns pointer to data array */
                        ((struct sdfits_data_columns*)
                        (guppi_databuf_data(db_out, curblock_out) + struct_offset))->data = 
                        (unsigned char*)(guppi_databuf_data(db_out, curblock_out) + array_offset);

                        index_out->num_datasets = index_out->num_datasets + 1;
                    }
                
                }

                accum_time = 0;
                integ_num += 1;

                reset_accumulators(accumulator, data_cols, accum_dirty,
                                sf.hdr.nsubband, sf.hdr.nchan);
            }

            /* Only add spectrum to accumulator if blanking bit is low */
            if((freq_heap->status_bits & 0x08) == 0)
            {
                /* Fill in data columns header fields */
                if(!accum_dirty[accumid])
                {
                    /*Record SPEAD header fields*/
                    data_cols[accumid].time = index_in->cpu_gpu_buf[heap].heap_rcvd_mjd;
                    data_cols[accumid].time_counter = freq_heap->time_cntr;
                    data_cols[accumid].integ_num = integ_num;
                    data_cols[accumid].sttspec = freq_heap->spectrum_cntr;
                    data_cols[accumid].accumid = accumid;

                    /* Fill in rest of fields from status buffer */
                    strcpy(data_cols[accumid].object, sf.data_columns.object);
                    data_cols[accumid].azimuth = sf.data_columns.azimuth;
                    data_cols[accumid].elevation = sf.data_columns.elevation;
                    data_cols[accumid].bmaj = sf.data_columns.bmaj;
                    data_cols[accumid].bmin = sf.data_columns.bmin;
                    data_cols[accumid].bpa = sf.data_columns.bpa;
                    data_cols[accumid].centre_freq_idx = sf.data_columns.centre_freq_idx;
                    data_cols[accumid].ra = sf.data_columns.ra;
                    data_cols[accumid].dec = sf.data_columns.dec;
                    data_cols[accumid].exposure = 0.0;

                    for(i = 0; i < NUM_SW_STATES; i++)
                        data_cols[accumid].centre_freq[i] = sf.data_columns.centre_freq[i];

                    accum_dirty[accumid] = 1;
                }

                data_cols[accumid].exposure += (float)(freq_heap->integ_size)/pfb_rate;
                data_cols[accumid].stpspec = freq_heap->spectrum_cntr;

                /* Add spectrum to appropriate vector accumulator (high-bw mode) */
                if(payload_type == INT_PAYLOAD)
                {
                    for(i = 0; i < sf.hdr.nchan; i++)
                    {
                        for(j = 0; j < sf.hdr.nsubband; j++)
                        {
                            for(k = 0; k < NUM_STOKES; k++)
                            {
                                accumulator[accumid]
                                           [i*sf.hdr.nsubband*NUM_STOKES + j*NUM_STOKES + k] +=
                                    (float)i_payload[i*sf.hdr.nsubband*NUM_STOKES + j*NUM_STOKES + k];
                            }
                        }
                    }
                }

                /* Add spectrum to appropriate vector accumulator (low-bw mode) */
                else
                {
                    for(i = 0; i < sf.hdr.nchan; i++)
                    {
                        for(j = 0; j < sf.hdr.nsubband; j++)
                        {
                            for(k = 0; k < NUM_STOKES; k++)
                            {
                                accumulator[accumid]
                                           [i*sf.hdr.nsubband*NUM_STOKES + j*NUM_STOKES + k] +=
                                    f_payload[i*sf.hdr.nsubband*NUM_STOKES + j*NUM_STOKES + k];
                            }
                        }
                    }
                }

            }
            
            accum_time += (double)freq_heap->integ_size / pfb_rate;
        }

        /* Update packet count and loss fields from input header */
        nblock_int++;
        npacket += gp.num_pkts_rcvd;
        n_pkt_drop += gp.num_pkts_dropped;

        /* Done with current input block */
        guppi_databuf_set_free(db_in, curblock_in);
        curblock_in = (curblock_in + 1) % db_in->n_block;

        /* Check for cancel */
        pthread_testcancel();
    }

    pthread_exit(NULL);
    pthread_cleanup_pop(0); /* Closes set_exit_status */
    pthread_cleanup_pop(0); /* Closes set_finished */
    pthread_cleanup_pop(0); /* Closes guppi_free_sdfits */
    pthread_cleanup_pop(0); /* Closes ? */
    pthread_cleanup_pop(0); /* Closes destroy_accumulators */
    pthread_cleanup_pop(0); /* Closes guppi_status_detach */
    pthread_cleanup_pop(0); /* Closes guppi_databuf_detach */
}
Esempio n. 15
0
static void hputi(int n, hFILE *f)
{
    char b[64];
    sprintf(b,"%d",n);
    hputs(b,f);
}
Esempio n. 16
0
int main(int argc, char *argv[]) {

    int rv;
    struct vegas_status s;

    rv = vegas_status_attach(&s);
    if (rv!=VEGAS_OK) {
        fprintf(stderr, "Error connecting to shared mem.\n");
        perror(NULL);
        exit(1);
    }

    vegas_status_lock(&s);

    /* Loop over cmd line to fill in params */
    static struct option long_opts[] = {
        {"key",    1, NULL, 'k'},
        {"get",    1, NULL, 'g'},
        {"string", 1, NULL, 's'},
        {"float",  1, NULL, 'f'},
        {"double", 1, NULL, 'd'},
        {"int",    1, NULL, 'i'},
        {"quiet",  0, NULL, 'q'},
        {"clear",  0, NULL, 'C'},
        {"del",    0, NULL, 'D'},
        {0,0,0,0}
    };
    int opt,opti;
    char *key=NULL;
    float flttmp;
    double dbltmp;
    int inttmp;
    int quiet=0, clear=0;
    while ((opt=getopt_long(argc,argv,"k:g:s:f:d:i:qCD",long_opts,&opti))!=-1) {
        switch (opt) {
            case 'k':
                key = optarg;
                break;
            case 'g':
                hgetr8(s.buf, optarg, &dbltmp);
                printf("%g\n", dbltmp);
                break;
            case 's':
                if (key) 
                    hputs(s.buf, key, optarg);
                break;
            case 'f':
                flttmp = atof(optarg);
                if (key) 
                    hputr4(s.buf, key, flttmp);
                break;
            case 'd':
                dbltmp = atof(optarg);
                if (key) 
                    hputr8(s.buf, key, dbltmp);
                break;
            case 'i':
                inttmp = atoi(optarg);
                if (key) 
                    hputi4(s.buf, key, inttmp);
                break;
            case 'D':
                if (key)
                    hdel(s.buf, key);
                break;
            case 'C':
                clear=1;
                break;
            case 'q':
                quiet=1;
                break;
            default:
                break;
        }
    }

    /* If not quiet, print out buffer */
    if (!quiet) { 
        printf(s.buf); printf("\n"); 
    }

    vegas_status_unlock(&s);

    if (clear) 
        vegas_status_clear(&s);

    exit(0);
}
int main(int argc, char *argv[]) {

    int instance_id = 0;
    hashpipe_status_t *s;

    /* Loop over cmd line to fill in params */
    static struct option long_opts[] = {
        {"help",   0, NULL, 'h'},
        {"shmkey", 1, NULL, 'K'},
        {"key",    1, NULL, 'k'},
        {"get",    1, NULL, 'g'},
        {"string", 1, NULL, 's'},
        {"float",  1, NULL, 'f'},
        {"double", 1, NULL, 'd'},
        {"int",    1, NULL, 'i'},
        {"verbose",  0, NULL, 'v'},
        {"clear",  0, NULL, 'C'},
        {"del",    0, NULL, 'D'},
        {"query",  1, NULL, 'Q'},
        {"instance", 1, NULL, 'I'},
        {0,0,0,0}
    };
    int opt,opti;
    char *key=NULL;
    char value[81];
    float flttmp;
    double dbltmp;
    int inttmp;
    int verbose=0, clear=0;
    char keyfile[1000];
    while ((opt=getopt_long(argc,argv,"hk:g:s:f:d:i:vCDQ:K:I:",long_opts,&opti))!=-1) {
        switch (opt) {
            case 'K': // Keyfile
                snprintf(keyfile, sizeof(keyfile), "HASHPIPE_KEYFILE=%s", optarg);
                keyfile[sizeof(keyfile)-1] = '\0';
                putenv(keyfile);
                break;
            case 'I':
                instance_id = atoi(optarg);
                break;
            case 'k':
                key = optarg;
                break;
            case 'Q':
                s = get_status_buffer(instance_id);
                hashpipe_status_lock(s);
                hgets(s->buf, optarg, 80, value);
                hashpipe_status_unlock(s);
                value[80] = '\0';
                printf("%s\n", value);
                break;
            case 'g':
                s = get_status_buffer(instance_id);
                hashpipe_status_lock(s);
                hgetr8(s->buf, optarg, &dbltmp);
                hashpipe_status_unlock(s);
                printf("%g\n", dbltmp);
                break;
            case 's':
                if (key) {
                    s = get_status_buffer(instance_id);
                    hashpipe_status_lock(s);
                    hputs(s->buf, key, optarg);
                    hashpipe_status_unlock(s);
                }
                break;
            case 'f':
                flttmp = atof(optarg);
                if (key) {
                    s = get_status_buffer(instance_id);
                    hashpipe_status_lock(s);
                    hputr4(s->buf, key, flttmp);
                    hashpipe_status_unlock(s);
                }
                break;
            case 'd':
                dbltmp = atof(optarg);
                if (key) {
                    s = get_status_buffer(instance_id);
                    hashpipe_status_lock(s);
                    hputr8(s->buf, key, dbltmp);
                    hashpipe_status_unlock(s);
                }
                break;
            case 'i':
                inttmp = atoi(optarg);
                if (key) {
                    s = get_status_buffer(instance_id);
                    hashpipe_status_lock(s);
                    hputi4(s->buf, key, inttmp);
                    hashpipe_status_unlock(s);
                }
                break;
            case 'D':
                if (key) {
                    s = get_status_buffer(instance_id);
                    hashpipe_status_lock(s);
                    hdel(s->buf, key);
                    hashpipe_status_unlock(s);
                }
                break;
            case 'C':
                clear=1;
                break;
            case 'v':
                verbose=1;
                break;
            case 'h':
                usage();
                return 0;
            case '?': // Command line parsing error
            default:
                usage();
                exit(1);
                break;
        }
    }

    s = get_status_buffer(instance_id);

    /* If verbose, print out buffer */
    if (verbose) { 
        hashpipe_status_lock(s);
        printf("%s\n", s->buf);
        hashpipe_status_unlock(s);
    }

    if (clear) 
        hashpipe_status_clear(s);

    exit(0);
}
Esempio n. 18
0
int main(int argc, char *argv[]) {

    /* thread args */
    struct guppi_thread_args net_args, pfb_args, accum_args, disk_args;
    guppi_thread_args_init(&net_args);
    guppi_thread_args_init(&pfb_args);
    guppi_thread_args_init(&accum_args);
    guppi_thread_args_init(&disk_args);

    net_args.output_buffer = 1;
    pfb_args.input_buffer = net_args.output_buffer;
    pfb_args.output_buffer = 2;
    accum_args.input_buffer = pfb_args.output_buffer;
    accum_args.output_buffer = 3;
    disk_args.input_buffer = accum_args.output_buffer;

    /* Init status shared mem */
    struct guppi_status stat;
    int rv = guppi_status_attach(&stat);
    if (rv!=GUPPI_OK) {
        fprintf(stderr, "Error connecting to guppi_status\n");
        exit(1);
    }

    hputs(stat.buf, "BW_MODE", "low");
    hputs(stat.buf, "SWVER", SWVER);

    /* Init first shared data buffer */
    struct guppi_databuf *gpu_input_dbuf=NULL;
    gpu_input_dbuf = guppi_databuf_attach(pfb_args.input_buffer);

    /* If attach fails, first try to create the databuf */
    if (gpu_input_dbuf==NULL) 
        gpu_input_dbuf = guppi_databuf_create(24, 32*1024*1024,
                            pfb_args.input_buffer, GPU_INPUT_BUF);

    /* If that also fails, exit */
    if (gpu_input_dbuf==NULL) {
        fprintf(stderr, "Error connecting to gpu_input_dbuf\n");
        exit(1);
    }

    guppi_databuf_clear(gpu_input_dbuf);

    /* Init second shared data buffer */
    struct guppi_databuf *cpu_input_dbuf=NULL;
    cpu_input_dbuf = guppi_databuf_attach(accum_args.input_buffer);

    /* If attach fails, first try to create the databuf */
    if (cpu_input_dbuf==NULL) 
        cpu_input_dbuf = guppi_databuf_create(24, 32*1024*1024,
                            accum_args.input_buffer, CPU_INPUT_BUF);

    /* If that also fails, exit */
    if (cpu_input_dbuf==NULL) {
        fprintf(stderr, "Error connecting to cpu_input_dbuf\n");
        exit(1);
    }

    guppi_databuf_clear(cpu_input_dbuf);

    /* Init third shared data buffer */
    struct guppi_databuf *disk_input_dbuf=NULL;
    disk_input_dbuf = guppi_databuf_attach(disk_args.input_buffer);

    /* If attach fails, first try to create the databuf */
    if (disk_input_dbuf==NULL) 
        disk_input_dbuf = guppi_databuf_create(16, 32*1024*1024,
                            disk_args.input_buffer, DISK_INPUT_BUF);

    /* If that also fails, exit */
    if (disk_input_dbuf==NULL) {
        fprintf(stderr, "Error connecting to disk_input_dbuf\n");
        exit(1);
    }

    guppi_databuf_clear(disk_input_dbuf);

    signal(SIGINT, cc);

    /* Launch net thread */
    pthread_t net_thread_id;
#ifdef FAKE_NET
    rv = pthread_create(&net_thread_id, NULL, guppi_fake_net_thread,
            (void *)&net_args);
#else
    rv = pthread_create(&net_thread_id, NULL, guppi_net_thread,
            (void *)&net_args);
#endif
    if (rv) { 
        fprintf(stderr, "Error creating net thread.\n");
        perror("pthread_create");
        exit(1);
    }

    /* Launch PFB thread */
    pthread_t pfb_thread_id;

    rv = pthread_create(&pfb_thread_id, NULL, vegas_pfb_thread, (void *)&pfb_args);

    if (rv) { 
        fprintf(stderr, "Error creating PFB thread.\n");
        perror("pthread_create");
        exit(1);
    }

    /* Launch accumulator thread */
    pthread_t accum_thread_id;

    rv = pthread_create(&accum_thread_id, NULL, guppi_accum_thread, (void *)&accum_args);

    if (rv) { 
        fprintf(stderr, "Error creating accumulator thread.\n");
        perror("pthread_create");
        exit(1);
    }

    /* Launch RAW_DISK thread, SDFITS disk thread, or PSRFITS disk thread */
    pthread_t disk_thread_id;
#ifdef RAW_DISK
    rv = pthread_create(&disk_thread_id, NULL, guppi_rawdisk_thread, 
        (void *)&disk_args);
#elif defined NULL_DISK
    rv = pthread_create(&disk_thread_id, NULL, guppi_null_thread, 
        (void *)&disk_args);
#elif defined EXT_DISK
    rv = 0;
#elif FITS_TYPE == PSRFITS
    rv = pthread_create(&disk_thread_id, NULL, guppi_psrfits_thread, 
        (void *)&disk_args);
#elif FITS_TYPE == SDFITS
    rv = pthread_create(&disk_thread_id, NULL, guppi_sdfits_thread, 
        (void *)&disk_args);
#endif
    if (rv) { 
        fprintf(stderr, "Error creating disk thread.\n");
        perror("pthread_create");
        exit(1);
    }

    /* Wait for end */
    run_threads=1;
    while (run_threads) { 
        sleep(1); 
        if (disk_args.finished) run_threads=0;
    }
 
    pthread_cancel(disk_thread_id);
    pthread_cancel(pfb_thread_id);
    pthread_cancel(accum_thread_id);
    pthread_cancel(net_thread_id);
    pthread_kill(disk_thread_id,SIGINT);
    pthread_kill(accum_thread_id,SIGINT);
    pthread_kill(pfb_thread_id,SIGINT);
    pthread_kill(net_thread_id,SIGINT);
    pthread_join(net_thread_id,NULL);
    printf("Joined net thread\n"); fflush(stdout);
    pthread_join(pfb_thread_id,NULL);
    printf("Joined PFB thread\n"); fflush(stdout);
    pthread_join(accum_thread_id,NULL);
    printf("Joined accumulator thread\n"); fflush(stdout);
    pthread_join(disk_thread_id,NULL);
    printf("Joined disk thread\n"); fflush(stdout);

    guppi_thread_args_destroy(&net_args);
    guppi_thread_args_destroy(&pfb_args);
    guppi_thread_args_destroy(&accum_args);
    guppi_thread_args_destroy(&disk_args);

    exit(0);
}
Esempio n. 19
0
int main(void)
{
    static const int size[] = { 1, 13, 403, 999, 30000 };

    char buffer[40000];
    char *original;
    int c, i;
    ssize_t n;
    off_t off;

    reopen("vcf.c", "test/hfile1.tmp");
    while ((c = hgetc(fin)) != EOF) {
        if (hputc(c, fout) == EOF) fail("hputc");
    }
    if (herrno(fin)) { errno = herrno(fin); fail("hgetc"); }

    reopen("test/hfile1.tmp", "test/hfile2.tmp");
    if (hpeek(fin, buffer, 50) < 0) fail("hpeek");
    while ((n = hread(fin, buffer, 17)) > 0) {
        if (hwrite(fout, buffer, n) != n) fail("hwrite");
    }
    if (n < 0) fail("hread");

    reopen("test/hfile2.tmp", "test/hfile3.tmp");
    while ((n = hread(fin, buffer, sizeof buffer)) > 0) {
        if (hwrite(fout, buffer, n) != n) fail("hwrite");
        if (hpeek(fin, buffer, 700) < 0) fail("hpeek");
    }
    if (n < 0) fail("hread");

    reopen("test/hfile3.tmp", "test/hfile4.tmp");
    i = 0;
    off = 0;
    while ((n = hread(fin, buffer, size[i++ % 5])) > 0) {
        off += n;
        buffer[n] = '\0';
        check_offset(fin, off, "pre-peek");
        if (hputs(buffer, fout) == EOF) fail("hputs");
        if ((n = hpeek(fin, buffer, size[(i+3) % 5])) < 0) fail("hpeek");
        check_offset(fin, off, "post-peek");
    }
    if (n < 0) fail("hread");

    reopen("test/hfile4.tmp", "test/hfile5.tmp");
    n = hread(fin, buffer, 200);
    if (n < 0) fail("hread");
    else if (n != 200) fail("hread only got %d", (int)n);
    if (hwrite(fout, buffer, 1000) != 1000) fail("hwrite");
    check_offset(fin, 200, "input/first200");
    check_offset(fout, 1000, "output/first200");

    if (hseek(fin, 800, SEEK_CUR) < 0) fail("hseek/cur");
    check_offset(fin, 1000, "input/seek");
    for (off = 1000; (n = hread(fin, buffer, sizeof buffer)) > 0; off += n)
        if (hwrite(fout, buffer, n) != n) fail("hwrite");
    if (n < 0) fail("hread");
    check_offset(fin, off, "input/eof");
    check_offset(fout, off, "output/eof");

    if (hseek(fin, 200, SEEK_SET) < 0) fail("hseek/set");
    if (hseek(fout, 200, SEEK_SET) < 0) fail("hseek(output)");
    check_offset(fin, 200, "input/backto200");
    check_offset(fout, 200, "output/backto200");
    n = hread(fin, buffer, 800);
    if (n < 0) fail("hread");
    else if (n != 800) fail("hread only got %d", (int)n);
    if (hwrite(fout, buffer, 800) != 800) fail("hwrite");
    check_offset(fin, 1000, "input/wrote800");
    check_offset(fout, 1000, "output/wrote800");

    if (hflush(fout) == EOF) fail("hflush");

    original = slurp("vcf.c");
    for (i = 1; i <= 5; i++) {
        char *text;
        sprintf(buffer, "test/hfile%d.tmp", i);
        text = slurp(buffer);
        if (strcmp(original, text) != 0) {
            fprintf(stderr, "%s differs from vcf.c\n", buffer);
            return EXIT_FAILURE;
        }
        free(text);
    }
    free(original);

    if (hclose(fin) != 0) fail("hclose(input)");
    if (hclose(fout) != 0) fail("hclose(output)");

    fout = hopen("test/hfile_chars.tmp", "w");
    if (fout == NULL) fail("hopen(\"test/hfile_chars.tmp\")");
    for (i = 0; i < 256; i++)
        if (hputc(i, fout) != i) fail("chars: hputc (%d)", i);
    if (hclose(fout) != 0) fail("hclose(test/hfile_chars.tmp)");

    fin = hopen("test/hfile_chars.tmp", "r");
    if (fin == NULL) fail("hopen(\"test/hfile_chars.tmp\") for reading");
    for (i = 0; i < 256; i++)
        if ((c = hgetc(fin)) != i)
            fail("chars: hgetc (%d = 0x%x) returned %d = 0x%x", i, i, c, c);
    if ((c = hgetc(fin)) != EOF) fail("chars: hgetc (EOF) returned %d", c);
    if (hclose(fin) != 0) fail("hclose(test/hfile_chars.tmp) for reading");

    fin = hopen("data:hello, world!\n", "r");
    if (fin == NULL) fail("hopen(\"data:...\")");
    n = hread(fin, buffer, 300);
    if (n < 0) fail("hread");
    buffer[n] = '\0';
    if (strcmp(buffer, "hello, world!\n") != 0) fail("hread result");
    if (hclose(fin) != 0) fail("hclose(\"data:...\")");

    return EXIT_SUCCESS;
}
Esempio n. 20
0
int main(int argc, char *argv[]) {

    static struct option long_opts[] = {
        {"help",   0, NULL, 'h'},
        {0,0,0,0}
    };
    int opt, opti;
    while ((opt=getopt_long(argc,argv,"h",long_opts,&opti))!=-1) {
        switch (opt) {
        default:
        case 'h':
            usage();
            exit(0);
            break;
        }
    }

    /* Create FIFO */
    int rv = mkfifo(vegas_DAQ_CONTROL, 0666);
    if (rv!=0 && errno!=EEXIST) {
        fprintf(stderr, "vegas_daq_server: Error creating control fifo\n");
        perror("mkfifo");
        exit(1);
    }

    /* Open command FIFO for read */
#define MAX_CMD_LEN 1024
    char cmd[MAX_CMD_LEN];
    int command_fifo;
    command_fifo = open(vegas_DAQ_CONTROL, O_RDONLY | O_NONBLOCK);
    if (command_fifo<0) {
        fprintf(stderr, "vegas_daq_server: Error opening control fifo\n");
        perror("open");
        exit(1);
    }

    /* Attach to shared memory buffers */
    struct vegas_status stat;
    struct vegas_databuf *dbuf_net=NULL, *dbuf_pfb=NULL, *dbuf_acc=NULL;
    rv = vegas_status_attach(&stat);
    const int netbuf_id = 1;
    const int pfbbuf_id = 2;
    const int accbuf_id = 3;
    if (rv!=VEGAS_OK) {
        fprintf(stderr, "Error connecting to vegas_status\n");
        exit(1);
    }
    dbuf_net = vegas_databuf_attach(netbuf_id);
    if (dbuf_net==NULL) {
        fprintf(stderr, "Error connecting to vegas_databuf (raw net)\n");
        exit(1);
    }
    vegas_databuf_clear(dbuf_net);
    dbuf_pfb = vegas_databuf_attach(pfbbuf_id);
    if (dbuf_pfb==NULL) {
        fprintf(stderr, "Error connecting to vegas_databuf (accum input)\n");
        exit(1);
    }
    vegas_databuf_clear(dbuf_pfb);

    dbuf_acc = vegas_databuf_attach(accbuf_id);
    if (dbuf_acc==NULL) {
        fprintf(stderr, "Error connecting to vegas_databuf (accum output)\n");
        exit(1);
    }
    vegas_databuf_clear(dbuf_acc);

    /* Thread setup */
#define MAX_THREAD 8
    int i;
    int nthread_cur = 0;
    struct vegas_thread_args args[MAX_THREAD];
    pthread_t thread_id[MAX_THREAD];
    for (i=0; i<MAX_THREAD; i++) thread_id[i] = 0;

    /* Print start time for logs */
    time_t curtime = time(NULL);
    char tmp[256];
    printf("\nvegas_daq_server started at %s", ctime_r(&curtime,tmp));
    fflush(stdout);

    /* hmm.. keep this old signal stuff?? */
    run=1;
    srv_run=1;
    signal(SIGINT, srv_cc);
    signal(SIGTERM, srv_quit);

    /* Loop over recv'd commands, process them */
    int cmd_wait=1;
    while (cmd_wait && srv_run) {

        // Check to see if threads have exited, if so, stop them
        if (check_thread_exit(args, nthread_cur)) {
            run = 0;
            stop_threads(args, thread_id, nthread_cur);
            nthread_cur = 0;
        }

        // Heartbeat, status update
        time_t curtime;
        char timestr[32];
        char *ctmp;
        time(&curtime);
        ctime_r(&curtime, timestr);
        ctmp = strchr(timestr, '\n');
        if (ctmp!=NULL) {
            *ctmp = '\0';
        }
        else {
            timestr[0]='\0';
        }
        vegas_status_lock(&stat);
        hputs(stat.buf, "DAQPULSE", timestr);
        hputs(stat.buf, "DAQSTATE", nthread_cur==0 ? "stopped" : "running");
        vegas_status_unlock(&stat);

        // Flush any status/error/etc for logfiles
        fflush(stdout);
        fflush(stderr);

        // Wait for data on fifo
        struct pollfd pfd;
        pfd.fd = command_fifo;
        pfd.events = POLLIN;
        rv = poll(&pfd, 1, 1000);
        if (rv==0) {
            continue;
        }
        else if (rv<0) {
            if (errno!=EINTR) perror("poll");
            continue;
        }

        // If we got POLLHUP, it means the other side closed its
        // connection.  Close and reopen the FIFO to clear this
        // condition.  Is there a better/recommended way to do this?
        if (pfd.revents==POLLHUP) {
            close(command_fifo);
            command_fifo = open(vegas_DAQ_CONTROL, O_RDONLY | O_NONBLOCK);
            if (command_fifo<0) {
                fprintf(stderr,
                        "vegas_daq_server: Error opening control fifo\n");
                perror("open");
                break;
            }
            continue;
        }

        // Read the command
        memset(cmd, 0, MAX_CMD_LEN);
        rv = read(command_fifo, cmd, MAX_CMD_LEN-1);
        if (rv==0) {
            continue;
        }
        else if (rv<0) {
            if (errno==EAGAIN) {
                continue;
            }
            else {
                perror("read");
                continue;
            }
        }

        // Truncate at newline
        // TODO: allow multiple commands in one read?
        char *ptr = strchr(cmd, '\n');
        if (ptr!=NULL) *ptr='\0';

        // Process the command
        if (strncasecmp(cmd,"QUIT",MAX_CMD_LEN)==0) {
            // Exit program
            printf("Exit\n");
            run = 0;
            stop_threads(args, thread_id, nthread_cur);
            cmd_wait=0;
            continue;
        }

        else if (strncasecmp(cmd,"START",MAX_CMD_LEN)==0 ||
                 strncasecmp(cmd,"MONITOR",MAX_CMD_LEN)==0) {
            // Start observations
            // TODO : decide how to behave if observations are running
            printf("Start observations\n");

            if (nthread_cur>0) {
                printf("  observations already running!\n");
            } else {

                // Figure out which mode to start
                char obs_mode[32];
                if (strncasecmp(cmd,"START",MAX_CMD_LEN)==0) {
                    vegas_status_lock(&stat);
                    vegas_read_obs_mode(stat.buf, obs_mode);
                    vegas_status_unlock(&stat);
                } else {
                    strncpy(obs_mode, cmd, 32);
                }
                printf("  obs_mode = %s\n", obs_mode);

                // Clear out data bufs
                vegas_databuf_clear(dbuf_net);
                vegas_databuf_clear(dbuf_pfb);
                vegas_databuf_clear(dbuf_acc);


                // Do it
                run = 1;
                if (strncasecmp(obs_mode, "HBW", 4)==0) {
                    hputs(stat.buf, "BW_MODE", "high");
                    hputs(stat.buf, "SWVER", "1.4");
                    init_hbw_mode(args, &nthread_cur);
                    start_hbw_mode(args, thread_id);
                } else if (strncasecmp(obs_mode, "LBW", 4)==0) {
                    hputs(stat.buf, "BW_MODE", "low");
                    hputs(stat.buf, "SWVER", "1.4");

                    init_lbw_mode(args, &nthread_cur);
                    start_lbw_mode(args, thread_id);
                } else if (strncasecmp(obs_mode, "MONITOR", 8)==0) {
                    init_monitor_mode(args, &nthread_cur);
                    start_monitor_mode(args, thread_id);
                } else {
                    printf("  unrecognized obs_mode!\n");
                }

            }

        }

        else if (strncasecmp(cmd,"STOP",MAX_CMD_LEN)==0) {
            // Stop observations
            printf("Stop observations\n");
            run = 0;
            stop_threads(args, thread_id, nthread_cur);
            nthread_cur = 0;
        }

        else {
            // Unknown command
            printf("Unrecognized command '%s'\n", cmd);
        }
    }

    /* Stop any running threads */
    run = 0;
    stop_threads(args, thread_id, nthread_cur);

    if (command_fifo>0) close(command_fifo);

    vegas_status_lock(&stat);
    hputs(stat.buf, "DAQSTATE", "exiting");
    vegas_status_unlock(&stat);

    curtime = time(NULL);
    printf("vegas_daq_server exiting cleanly at %s\n", ctime_r(&curtime,tmp));

    fflush(stdout);
    fflush(stderr);

    /* TODO: remove FIFO */

    exit(0);
}
Esempio n. 21
0
void guppi_psrfits_thread(void *_args) {
    
    /* Get args */
    struct guppi_thread_args *args = (struct guppi_thread_args *)_args;
    pthread_cleanup_push((void *)guppi_thread_set_finished, args);
    
    /* Set cpu affinity */
    cpu_set_t cpuset, cpuset_orig;
    sched_getaffinity(0, sizeof(cpu_set_t), &cpuset_orig);
    CPU_ZERO(&cpuset);
    CPU_SET(1, &cpuset);
    int rv = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset);
    if (rv<0) { 
        guppi_error("guppi_psrfits_thread", "Error setting cpu affinity.");
        perror("sched_setaffinity");
    }

    /* Set priority */
    rv = setpriority(PRIO_PROCESS, 0, args->priority);
    if (rv<0) {
        guppi_error("guppi_psrfits_thread", "Error setting priority level.");
        perror("set_priority");
    }
    
    /* Attach to status shared mem area */
    struct guppi_status st;
    rv = guppi_status_attach(&st);
    if (rv!=GUPPI_OK) {
        guppi_error("guppi_psrfits_thread", 
                    "Error attaching to status shared memory.");
        pthread_exit(NULL);
    }
    pthread_cleanup_push((void *)guppi_status_detach, &st);
    pthread_cleanup_push((void *)set_exit_status, &st);
    
    /* Init status */
    guppi_status_lock_safe(&st);
    hputs(st.buf, STATUS_KEY, "init");
    guppi_status_unlock_safe(&st);
    
    /* Initialize some key parameters */
    struct guppi_params gp;
    struct psrfits pf;
    pf.sub.data = NULL;
    pf.sub.dat_freqs = pf.sub.dat_weights = NULL;
    pf.sub.dat_offsets = pf.sub.dat_scales = NULL;
    pf.hdr.chan_dm = 0.0;
    pf.filenum = 0; // This is crucial
    pthread_cleanup_push((void *)guppi_free_psrfits, &pf);
    pthread_cleanup_push((void *)psrfits_close, &pf);
    //pf.multifile = 0;  // Use a single file for fold mode
    pf.multifile = 1;  // Use a multiple files for fold mode
    pf.quiet = 0;      // Print a message per each subint written
    
    /* Attach to databuf shared mem */
    struct guppi_databuf *db;
    db = guppi_databuf_attach(args->input_buffer);
    if (db==NULL) {
        guppi_error("guppi_psrfits_thread",
                    "Error attaching to databuf shared memory.");
        pthread_exit(NULL);
    }
    pthread_cleanup_push((void *)guppi_databuf_detach, db);
    
    /* Loop */
    int curblock=0, total_status=0, firsttime=1, run=1, got_packet_0=0;
    int mode=SEARCH_MODE;
    char *ptr;
    char tmpstr[256];
    struct foldbuf fb;
    struct polyco pc[64];  
    memset(pc, 0, sizeof(pc));
    int n_polyco_written=0;
    float *fold_output_array = NULL;
    int scan_finished=0;
    signal(SIGINT, cc);
    do {
        /* Note waiting status */
        guppi_status_lock_safe(&st);
        if (got_packet_0)
            sprintf(tmpstr, "waiting(%d)", curblock);
        else
            sprintf(tmpstr, "ready");
        hputs(st.buf, STATUS_KEY, tmpstr);
        guppi_status_unlock_safe(&st);
        
        /* Wait for buf to have data */
        rv = guppi_databuf_wait_filled(db, curblock);
        if (rv!=0) {
            // This is a big ol' kludge to avoid this process hanging
            // due to thread synchronization problems.
            sleep(1);
            continue; 
        }

        /* Note current block */
        guppi_status_lock_safe(&st);
        hputi4(st.buf, "CURBLOCK", curblock);
        guppi_status_unlock_safe(&st);

        /* See how full databuf is */
        total_status = guppi_databuf_total_status(db);
        
        /* Read param structs for this block */
        ptr = guppi_databuf_header(db, curblock);
        if (firsttime) {
            guppi_read_obs_params(ptr, &gp, &pf);
            firsttime = 0;
        } else {
            guppi_read_subint_params(ptr, &gp, &pf);
        }

        /* Find out what mode this data is in */
        mode = psrfits_obs_mode(pf.hdr.obs_mode);

        /* Check if we got both packet 0 and a valid observation
         * start time.  If so, flag writing to start.
         */
        if (got_packet_0==0 && gp.packetindex==0 && gp.stt_valid==1) {
            got_packet_0 = 1;
            guppi_read_obs_params(ptr, &gp, &pf);
            guppi_update_ds_params(&pf);
            memset(pc, 0, sizeof(pc));
            n_polyco_written=0;
        }

        /* If actual observation has started, write the data */
        if (got_packet_0) { 

            /* Note waiting status */
            guppi_status_lock_safe(&st);
            hputs(st.buf, STATUS_KEY, "writing");
            guppi_status_unlock_safe(&st);
            
            /* Get the pointer to the current data */
            if (mode==FOLD_MODE) {
                fb.nchan = pf.hdr.nchan;
                fb.npol = pf.hdr.npol;
                fb.nbin = pf.hdr.nbin;
                fb.data = (float *)guppi_databuf_data(db, curblock);
                fb.count = (unsigned *)(guppi_databuf_data(db, curblock)
                        + foldbuf_data_size(&fb));
                fold_output_array = (float *)realloc(fold_output_array,
                        sizeof(float) * pf.hdr.nbin * pf.hdr.nchan * 
                        pf.hdr.npol);
                pf.sub.data = (unsigned char *)fold_output_array;
                pf.fold.pc = (struct polyco *)(guppi_databuf_data(db,curblock)
                        + foldbuf_data_size(&fb) + foldbuf_count_size(&fb));
            } else 
                pf.sub.data = (unsigned char *)guppi_databuf_data(db, curblock);
            
            /* Set the DC and Nyquist channels explicitly to zero */
            /* because of the "FFT Problem" that splits DC power  */
            /* into those two bins.                               */
            zero_end_chans(&pf);

            /* Output only Stokes I (in place) */
            if (pf.hdr.onlyI && pf.hdr.npol==4)
                get_stokes_I(&pf);

            /* Downsample in frequency (in place) */
            if (pf.hdr.ds_freq_fact > 1)
                downsample_freq(&pf);

            /* Downsample in time (in place) */
            if (pf.hdr.ds_time_fact > 1)
                downsample_time(&pf);

            /* Folded data needs a transpose */
            if (mode==FOLD_MODE)
                normalize_transpose_folds(fold_output_array, &fb);

            /* Write the data */
            int last_filenum = pf.filenum;
            psrfits_write_subint(&pf);

            /* Any actions that need to be taken when a new file
             * is created.
             */
            if (pf.filenum!=last_filenum) {
                /* No polycos yet written to the new file */
                n_polyco_written=0;
            }

            /* Write the polycos if needed */
            int write_pc=0, i, j;
            for (i=0; i<pf.fold.n_polyco_sets; i++) {
                if (pf.fold.pc[i].used==0) continue; 
                int new_pc=1;
                for (j=0; j<n_polyco_written; j++) {
                    if (polycos_differ(&pf.fold.pc[i], &pc[j])==0) {
                        new_pc=0;
                        break;
                    }
                }
                if (new_pc || n_polyco_written==0) {
                    pc[n_polyco_written] = pf.fold.pc[i];
                    n_polyco_written++;
                    write_pc=1;
                } else {
                    pf.fold.pc[i].used = 0; // Already have this one
                }
            }
            if (write_pc) 
                psrfits_write_polycos(&pf, pf.fold.pc, pf.fold.n_polyco_sets);

            /* Is the scan complete? */
            if ((pf.hdr.scanlen > 0.0) && 
                (pf.T > pf.hdr.scanlen)) scan_finished = 1;
            
            /* For debugging... */
            if (gp.drop_frac > 0.0) {
               printf("Block %d dropped %.3g%% of the packets\n", 
                      pf.tot_rows, gp.drop_frac*100.0);
            }

        }

        /* Mark as free */
        guppi_databuf_set_free(db, curblock);
        
        /* Go to next block */
        curblock = (curblock + 1) % db->n_block;
        
        /* Check for cancel */
        pthread_testcancel();
        
    } while (run && !scan_finished);
    
    /* Cleanup */
    
    if (fold_output_array!=NULL) free(fold_output_array);

    pthread_exit(NULL);
    
    pthread_cleanup_pop(0); /* Closes psrfits_close */
    pthread_cleanup_pop(0); /* Closes guppi_free_psrfits */
    pthread_cleanup_pop(0); /* Closes set_exit_status */
    pthread_cleanup_pop(0); /* Closes set_finished */
    pthread_cleanup_pop(0); /* Closes guppi_status_detach */
    pthread_cleanup_pop(0); /* Closes guppi_databuf_detach */
}
Esempio n. 22
0
uval
reload_image(const char* name, uval ofd)
{
	struct load_file_entry lf;
	uval start = 0;
	uval size = 0;
	uval rc = 0;
	int elftype;
	struct partition_status _ps;
	struct partition_status *ps=&_ps;
	int i;

	ps->lpid = H_SELF_LPID;
	ps->init_mem = 0;
	ps->init_mem_size = CHUNK_SIZE;
	ps->name = name;

	while (image_names[rc] != NULL && size == 0) {
		if (strcmp(image_names[rc], name) == 0) {
			start = (uval)image_start[rc];
			size = (uval)image_size[rc];
#ifdef DEBUG
			hprintf("%s: Found image named: %s "
				"@ 0x%lx, size 0x%lx.\n",
				__func__, name, start, size);
#endif
		}
		++rc;
	}

	if (size == 0) {
		hprintf("%s: could not find image named: %s.\n",
			__func__, name);
		return H_Parameter;
	}

#ifdef USE_LIBBZ2
	char *dcomp;
	uval dcomp_size;
	uval dcomp_buf_size;
	sval ret;
	ret = dcomp_img((char*)start, size,
			&dcomp, &dcomp_buf_size, &dcomp_size, &pa);

	if (ret >= 0) {
		hprintf("Decompressed image: %ld -> %ld\n", size, dcomp_size);
		start = (uval)dcomp;
		size = dcomp_size;
	} else {
		hprintf("Decompression failure %ld\n", ret);
	}
	/* else try to use as is, could be uncompressed */

#endif

	elftype = tryElf(&lf, start);
	if (!(elftype == KK_64 || elftype == KK_32)) {
		hputs("WARNING: Image is not elf, assuming it is a binary\n");
		lf.numberOfLoaderSegments = 1;
		lf.loaderSegmentTable[0].file_start = start;
		lf.loaderSegmentTable[0].file_size = start;
		lf.va_entry = 0;
		lf.va_tocPtr = 0;
	}

	uval load_offset = 0;
	struct loader_segment_entry *le;
	int segno = lf.numberOfLoaderSegments;
	uval begin = CHUNK_SIZE;
	uval end = 0;
	for (i = 0; i < segno; i++) {
		le = &(lf.loaderSegmentTable[i]);

		if (le->va_start < begin) {
			begin = le->va_start;
		}

		if (le->va_start + le->file_size > end) {
			end = le->va_start + le->file_size;
		}
	}

	load_offset = (uval) get_pages_aligned(&pa, end - begin, 20);
	load_offset -= begin;

	load_image(ps, &lf, load_offset);

	uval magic = lf.loaderSegmentTable[0].file_start;
	magic += 0x10;

	if (*(uval64*)magic == HYPE_PARTITION_INFO_MAGIC_NUMBER) {
		magic = *((uval*)(magic + 8));
	} else {
		magic = INVALID_LOGICAL_ADDRESS;
	}

	if (magic >= begin && magic < end) {
		load_in_lpar(ps, magic + load_offset,
			     (uval)pinfo, sizeof(pinfo));
	} else {
		hprintf("Don't know how to set up partition info "
			"not in OS image\n");
	}

	restart_partition(ps, &lf, ofd);
	assert(0, "reload_partition failed\n");

#ifdef USE_LIBBZ2
	/* Free de-compression buffer, is always allocated by dcomp_img */
	free_pages(&pa, (uval)dcomp, dcomp_buf_size);
#endif

	return H_Parameter;
}
Esempio n. 23
0
static void *run(hashpipe_thread_args_t * args, int doCPU)
{
    // Local aliases to shorten access to args fields
    paper_gpu_input_databuf_t *db_in = (paper_gpu_input_databuf_t *)args->ibuf;
    paper_output_databuf_t *db_out = (paper_output_databuf_t *)args->obuf;
    hashpipe_status_t st = args->st;
    const char * status_key = args->thread_desc->skey;

#ifdef DEBUG_SEMS
    fprintf(stderr, "s/tid %lu/                      GPU/\n", pthread_self());
#endif

    // Init integration control status variables
    int gpu_dev = 0;
    hashpipe_status_lock_safe(&st);
    hputs(st.buf,  "INTSTAT", "off");
    hputi8(st.buf, "INTSYNC", 0);
    hputi4(st.buf, "INTCOUNT", N_SUB_BLOCKS_PER_INPUT_BLOCK);
    hputi8(st.buf, "GPUDUMPS", 0);
    hgeti4(st.buf, "GPUDEV", &gpu_dev); // No change if not found
    hputi4(st.buf, "GPUDEV", gpu_dev);
    hashpipe_status_unlock_safe(&st);

    /* Loop */
    int rv;
    char integ_status[17];
    uint64_t start_mcount, last_mcount=0;
    uint64_t gpu_dumps=0;
    int int_count; // Number of blocks to integrate per dump
    int xgpu_error = 0;
    int curblock_in=0;
    int curblock_out=0;

    struct timespec start, stop;
    uint64_t elapsed_gpu_ns  = 0;
    uint64_t gpu_block_count = 0;

    // Initialize context to point at first input and output memory blocks.
    // This seems redundant since we do this just before calling
    // xgpuCudaXengine, but we need to pass something in for array_h and
    // matrix_x to prevent xgpuInit from allocating memory.
    XGPUContext context;
    context.array_h = (ComplexInput *)db_in->block[0].data;
    context.array_len = (db_in->header.n_block * sizeof(paper_gpu_input_block_t) - sizeof(paper_input_header_t)) / sizeof(ComplexInput);
    context.matrix_h = (Complex *)db_out->block[0].data;
    context.matrix_len = (db_out->header.n_block * sizeof(paper_output_block_t) - sizeof(paper_output_header_t)) / sizeof(Complex);

    xgpu_error = xgpuInit(&context, gpu_dev);
    if (XGPU_OK != xgpu_error) {
        fprintf(stderr, "ERROR: xGPU initialization failed (error code %d)\n", xgpu_error);
        return THREAD_ERROR;
    }

    while (run_threads()) {

        // Note waiting status,
        // query integrating status
        // and, if armed, start count
        hashpipe_status_lock_safe(&st);
        hputs(st.buf, status_key, "waiting");
        hgets(st.buf,  "INTSTAT", 16, integ_status);
        hgeti8(st.buf, "INTSYNC", (long long*)&start_mcount);
        hashpipe_status_unlock_safe(&st);

        // Wait for new input block to be filled
        while ((rv=hashpipe_databuf_wait_filled((hashpipe_databuf_t *)db_in, curblock_in)) != HASHPIPE_OK) {
            if (rv==HASHPIPE_TIMEOUT) {
                hashpipe_status_lock_safe(&st);
                hputs(st.buf, status_key, "blocked_in");
                hashpipe_status_unlock_safe(&st);
                continue;
            } else {
                hashpipe_error(__FUNCTION__, "error waiting for filled databuf");
                pthread_exit(NULL);
                break;
            }
        }

        // Got a new data block, update status and determine how to handle it
        hashpipe_status_lock_safe(&st);
        hputi4(st.buf, "GPUBLKIN", curblock_in);
        hputu8(st.buf, "GPUMCNT", db_in->block[curblock_in].header.mcnt);
        hashpipe_status_unlock_safe(&st);

        // If integration status "off"
        if(!strcmp(integ_status, "off")) {
            // Mark input block as free and advance
            hashpipe_databuf_set_free((hashpipe_databuf_t *)db_in, curblock_in);
            curblock_in = (curblock_in + 1) % db_in->header.n_block;
            // Skip to next input buffer
            continue;
        }

        // If integration status is "start"
        if(!strcmp(integ_status, "start")) {
            // If buffer mcount < start_mcount (i.e. not there yet)
            if(db_in->block[curblock_in].header.mcnt < start_mcount) {
              // Drop input buffer
              // Mark input block as free and advance
              hashpipe_databuf_set_free((hashpipe_databuf_t *)db_in, curblock_in);
              curblock_in = (curblock_in + 1) % db_in->header.n_block;
              // Skip to next input buffer
              continue;
            // Else if mcount == start_mcount (time to start)
            } else if(db_in->block[curblock_in].header.mcnt == start_mcount) {
              // Set integration status to "on"
              // Read integration count (INTCOUNT)
              fprintf(stderr, "--- integration on ---\n");
              strcpy(integ_status, "on");
              hashpipe_status_lock_safe(&st);
              hputs(st.buf,  "INTSTAT", integ_status);
              hgeti4(st.buf, "INTCOUNT", &int_count);
              hashpipe_status_unlock_safe(&st);
              // Compute last mcount
              last_mcount = start_mcount + (int_count-1) * N_SUB_BLOCKS_PER_INPUT_BLOCK;
            // Else (missed starting mcount)
            } else {
              // Handle missed start of integration
              // TODO!
              fprintf(stderr, "--- mcnt=%06lx > start_mcnt=%06lx ---\n",
                  db_in->block[curblock_in].header.mcnt, start_mcount);
            }
        }

        // Integration status is "on" or "stop"

        // Note processing status
        hashpipe_status_lock_safe(&st);
        hputs(st.buf, status_key, "processing gpu");
        hashpipe_status_unlock_safe(&st);


        // Setup for current chunk
        context.input_offset = curblock_in * sizeof(paper_gpu_input_block_t) / sizeof(ComplexInput);
        context.output_offset = curblock_out * sizeof(paper_output_block_t) / sizeof(Complex);

        // Call CUDA X engine function
        int doDump = 0;
        // Dump if this is the last block or we are doing both CPU and GPU
        // (GPU and CPU test mode always dumps every input block)
        if(db_in->block[curblock_in].header.mcnt >= last_mcount || doCPU) {
          doDump = 1;

          // Check whether we missed the end of integration.  If we get a block
          // whose mcnt is greater than last_mcount, then for some reason (e.g.
          // networking problems) we didn't see a block whose mcnt was
          // last_mcount.  This should "never" happen, but it has been seen to
          // occur when the 10 GbE links have many errors.
          if(db_in->block[curblock_in].header.mcnt > last_mcount) {
            // Can't do much error recovery, so just log it.
            fprintf(stderr, "--- mcnt=%06lx > last_mcnt=%06lx ---\n",
                db_in->block[curblock_in].header.mcnt, last_mcount);
          }

          // Wait for new output block to be free
          while ((rv=paper_output_databuf_wait_free(db_out, curblock_out)) != HASHPIPE_OK) {
              if (rv==HASHPIPE_TIMEOUT) {
                  hashpipe_status_lock_safe(&st);
                  hputs(st.buf, status_key, "blocked gpu out");
                  hashpipe_status_unlock_safe(&st);
                  continue;
              } else {
                  hashpipe_error(__FUNCTION__, "error waiting for free databuf");
                  pthread_exit(NULL);
                  break;
              }
          }
        }

        clock_gettime(CLOCK_MONOTONIC, &start);

        xgpuCudaXengine(&context, doDump ? SYNCOP_DUMP : SYNCOP_SYNC_TRANSFER);

        clock_gettime(CLOCK_MONOTONIC, &stop);
        elapsed_gpu_ns += ELAPSED_NS(start, stop);
        gpu_block_count++;

        if(doDump) {
          clock_gettime(CLOCK_MONOTONIC, &start);
          xgpuClearDeviceIntegrationBuffer(&context);
          clock_gettime(CLOCK_MONOTONIC, &stop);
          elapsed_gpu_ns += ELAPSED_NS(start, stop);

          // TODO Maybe need to subtract all or half the integration time here
          // depending on recevier's expectations.
          db_out->block[curblock_out].header.mcnt = last_mcount;
          // If integration status if "stop"
          if(!strcmp(integ_status, "stop")) {
            // Set integration status to "off"
            strcpy(integ_status, "off");
            hashpipe_status_lock_safe(&st);
            hputs(st.buf,  "INTSTAT", integ_status);
            hashpipe_status_unlock_safe(&st);
          } else {
            // Advance last_mcount for end of next integration
            last_mcount += int_count * N_SUB_BLOCKS_PER_INPUT_BLOCK;
          }

          // Mark output block as full and advance
          paper_output_databuf_set_filled(db_out, curblock_out);
          curblock_out = (curblock_out + 1) % db_out->header.n_block;
          // TODO Need to handle or at least check for overflow!

          // Update GPU dump counter and GPU Gbps
          gpu_dumps++;
          hashpipe_status_lock_safe(&st);
          hputi8(st.buf, "GPUDUMPS", gpu_dumps);
          hputr4(st.buf, "GPUGBPS", (float)(8*N_FLUFFED_BYTES_PER_BLOCK*gpu_block_count)/elapsed_gpu_ns);
          hashpipe_status_unlock_safe(&st);

          // Start new average
          elapsed_gpu_ns  = 0;
          gpu_block_count = 0;
        }

        if(doCPU) {

            /* Note waiting status */
            hashpipe_status_lock_safe(&st);
            hputs(st.buf, status_key, "waiting");
            hashpipe_status_unlock_safe(&st);

            // Wait for new output block to be free
            while ((rv=paper_output_databuf_wait_free(db_out, curblock_out)) != HASHPIPE_OK) {
                if (rv==HASHPIPE_TIMEOUT) {
                    hashpipe_status_lock_safe(&st);
                    hputs(st.buf, status_key, "blocked cpu out");
                    hashpipe_status_unlock_safe(&st);
                    continue;
                } else {
                    hashpipe_error(__FUNCTION__, "error waiting for free databuf");
                    pthread_exit(NULL);
                    break;
                }
            }

            // Note "processing cpu" status, current input block
            hashpipe_status_lock_safe(&st);
            hputs(st.buf, status_key, "processing cpu");
            hashpipe_status_unlock_safe(&st);

            /*
             * Call CPU X engine function
             */
            xgpuOmpXengine((Complex *)db_out->block[curblock_out].data, context.array_h);

            // Mark output block as full and advance
            paper_output_databuf_set_filled(db_out, curblock_out);
            curblock_out = (curblock_out + 1) % db_out->header.n_block;
            // TODO Need to handle or at least check for overflow!
        }

        // Mark input block as free and advance
        hashpipe_databuf_set_free((hashpipe_databuf_t *)db_in, curblock_in);
        curblock_in = (curblock_in + 1) % db_in->header.n_block;

        /* Check for cancel */
        pthread_testcancel();
    }

    xgpuFree(&context);

    // Thread success!
    return NULL;
}
Esempio n. 24
0
uval
launch_image(uval init_mem, uval init_mem_size, const char* name, uval ofd)
{
	struct load_file_entry lf;
	uval start = 0;
	uval size = 0;
	uval rc = 0;
	int elftype;
	struct partition_status *ps;
	int i;

	for (i = 0; i < MAX_MANAGED_PARTITIONS; ++i) {
		if (partitions[i].active == 0) {
			partitions[i].active = 1;
			partitions[i].lpid = 0;
			partitions[i].init_mem = init_mem;
			partitions[i].init_mem_size = init_mem_size;
			partitions[i].name = name;
			break;
		}
		if (i == MAX_MANAGED_PARTITIONS) {
			hprintf("unable to find slot to track partition\n");
			return 0;
		}
	}

	ps = &partitions[i];

	while (image_names[rc] != NULL && size == 0) {
		if (strcmp(image_names[rc], name) == 0) {
			start = (uval)image_start[rc];
			size = (uval)image_size[rc];
#ifdef DEBUG
			hprintf("%s: Found image named: %s "
				"@ 0x%lx, size 0x%lx.\n",
				__func__, name, start, size);
#endif
		}
		++rc;
	}

	if (size == 0) {
		hprintf("%s: could not find image named: %s.\n",
			__func__, name);
		return H_Parameter;
	}

#ifdef USE_LIBBZ2
	char *dcomp;
	uval dcomp_size;
	uval dcomp_buf_size;
	sval ret;
	ret = dcomp_img((char*)start, size,
			&dcomp, &dcomp_buf_size, &dcomp_size, &pa);

	if (ret >= 0) {
		hprintf("Decompressed image: %ld -> %ld\n", size, dcomp_size);
		start = (uval)dcomp;
		size = dcomp_size;
	} else {
		hprintf("Decompression failure %ld\n", ret);
	}
	/* else try to use as is, could be uncompressed */

#endif

	elftype = tryElf(&lf, start);
	if (!(elftype == KK_64 || elftype == KK_32)) {
		hputs("WARNING: Image is not elf, assuming it is a binary\n");
		lf.numberOfLoaderSegments = 1;
		lf.loaderSegmentTable[0].file_start = start;
		lf.loaderSegmentTable[0].file_size = start;
		lf.va_entry = 0;
		lf.va_tocPtr = 0;
	}


	load_image(ps, &lf, 0);

	uval id = launch_partition(ps, &lf, ofd);

#ifdef USE_LIBBZ2
	/* Free de-compression buffer, is always allocated by dcomp_img */
	free_pages(&pa, (uval)dcomp, dcomp_buf_size);
#endif

	updatePartInfo(IPC_LPAR_RUNNING, ps->lpid);


	return id;
}
Esempio n. 25
0
void guppi_null_thread(void *_args) {

    int rv;
    /* Set cpu affinity */
    cpu_set_t cpuset, cpuset_orig;
    sched_getaffinity(0, sizeof(cpu_set_t), &cpuset_orig);
    CPU_ZERO(&cpuset);
    CPU_SET(6, &cpuset);
    rv = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset);
    if (rv<0) { 
        guppi_error("guppi_null_thread", "Error setting cpu affinity.");
        perror("sched_setaffinity");
    }

    /* Set priority */
    rv = setpriority(PRIO_PROCESS, 0, 0);
    if (rv<0) {
        guppi_error("guppi_null_thread", "Error setting priority level.");
        perror("set_priority");
    }

    /* Get args */
    struct guppi_thread_args *args = (struct guppi_thread_args *)_args;

    /* Attach to status shared mem area */
    struct guppi_status st;
    rv = guppi_status_attach(&st);
    if (rv!=GUPPI_OK) {
        guppi_error("guppi_null_thread", 
                "Error attaching to status shared memory.");
        pthread_exit(NULL);
    }
    pthread_cleanup_push((void *)guppi_status_detach, &st);
    pthread_cleanup_push((void *)set_exit_status, &st);

    /* Init status */
    guppi_status_lock_safe(&st);
    hputs(st.buf, STATUS_KEY, "init");
    guppi_status_unlock_safe(&st);

    /* Attach to databuf shared mem */
    struct guppi_databuf *db;
    db = guppi_databuf_attach(args->input_buffer);
    if (db==NULL) {
        guppi_error("guppi_null_thread",
                "Error attaching to databuf shared memory.");
        pthread_exit(NULL);
    }
    pthread_cleanup_push((void *)guppi_databuf_detach, db);

    /* Loop */
    char *ptr;
    struct guppi_params gp;
#if FITS_TYPE == PSRFITS
    struct psrfits pf;
    pf.sub.dat_freqs = NULL;
    pf.sub.dat_weights = NULL;
    pf.sub.dat_offsets = NULL;
    pf.sub.dat_scales = NULL;
    pthread_cleanup_push((void *)guppi_free_psrfits, &pf);
#else
    struct sdfits pf;
    pthread_cleanup_push((void *)guppi_free_sdfits, &pf);
#endif
    int curblock=0;
    signal(SIGINT,cc);
    while (run_threads) {

        /* Note waiting status */
        guppi_status_lock_safe(&st);
        hputs(st.buf, STATUS_KEY, "waiting");
        guppi_status_unlock_safe(&st);

        /* Wait for buf to have data */
        rv = guppi_databuf_wait_filled(db, curblock);
        if (rv!=0) {
            //sleep(1);
            continue;
        }

        /* Note waiting status, current block */
        guppi_status_lock_safe(&st);
        hputs(st.buf, STATUS_KEY, "discarding");
        hputi4(st.buf, "DSKBLKIN", curblock);
        guppi_status_unlock_safe(&st);

        /* Get params */
        ptr = guppi_databuf_header(db, curblock);
        guppi_read_obs_params(ptr, &gp, &pf);

        /* Output if data was lost */
#if FITS_TYPE == PSRFITS
        if (gp.n_dropped!=0 && 
                (gp.packetindex==0 || strcmp(pf.hdr.obs_mode,"SEARCH"))) {
            printf("Block beginning with pktidx=%lld dropped %d packets\n",
                    gp.packetindex, gp.n_dropped);
            fflush(stdout);
        }
#else
        if (gp.num_pkts_dropped!=0 && gp.num_pkts_rcvd!=0) {
            printf("Block received %d packets and dropped %d packets\n",
                    gp.num_pkts_rcvd, gp.num_pkts_dropped);
            fflush(stdout);
        }
#endif

        /* Mark as free */
        guppi_databuf_set_free(db, curblock);

        /* Go to next block */
        curblock = (curblock + 1) % db->n_block;

        /* Check for cancel */
        pthread_testcancel();

    }

    pthread_exit(NULL);

    pthread_cleanup_pop(0); /* Closes set_exit_status */
    pthread_cleanup_pop(0); /* Closes guppi_free_psrfits */
    pthread_cleanup_pop(0); /* Closes guppi_status_detach */
    pthread_cleanup_pop(0); /* Closes guppi_databuf_detach */

}
static void *run(hashpipe_thread_args_t * args)
{
    s6_input_databuf_t *db  = (s6_input_databuf_t *)args->obuf;
    hashpipe_status_t *p_st = &(args->st);

    hashpipe_status_t st = args->st;
    const char * status_key = args->thread_desc->skey;

    //s6_input_block_t fake_data_block;

    /* Main loop */
    int i, rv;
    uint64_t mcnt = 0;
    uint64_t num_coarse_chan = N_COARSE_CHAN;
    uint64_t *data;
    int block_idx = 0;
    int error_count = 0, max_error_count = 0;
    float error, max_error = 0.0;
    int gen_fake = 0;

    hashpipe_status_lock_safe(&st);
    //hashpipe_status_lock_safe(p_st);
    hputi4(st.buf, "NUMCCHAN", N_COARSE_CHAN);
    hputi4(st.buf, "NUMFCHAN", N_FINE_CHAN);
    hputi4(st.buf, "NUMBBEAM", N_BYTES_PER_BEAM);
    hputi4(st.buf, "NUMBBLOC", sizeof(s6_input_block_t));
    hputi4(st.buf, "THRESHLD", POWER_THRESH);
    hgeti4(st.buf, "GENFAKE", &gen_fake);
    hashpipe_status_unlock_safe(&st);
    //hashpipe_status_unlock_safe(p_st);

    time_t t, prior_t;
    prior_t = time(&prior_t);

    while (run_threads()) {

        hashpipe_status_lock_safe(&st);
        //hashpipe_status_lock_safe(p_st);
        hputi4(st.buf, "NETBKOUT", block_idx);
        hputs(st.buf, status_key, "waiting");
        hashpipe_status_unlock_safe(&st);
        //hashpipe_status_unlock_safe(p_st);
 
        t = time(&t);
        fprintf(stderr, "elapsed seconds for block %d : %ld\n", block_idx, t - prior_t);
        prior_t = t;
        // Wait for data
        struct timespec sleep_dur, rem_sleep_dur;
        sleep_dur.tv_sec = 1;
        sleep_dur.tv_nsec = 0;
        //fprintf(stderr, "fake net thread sleeping for %7.5f seconds\n", 
        //        sleep_dur.tv_sec + (double)sleep_dur.tv_nsec/1000000000.0);
        nanosleep(&sleep_dur, &rem_sleep_dur);
	
        /* Wait for new block to be free, then clear it
         * if necessary and fill its header with new values.
         */
        while ((rv=s6_input_databuf_wait_free(db, block_idx)) 
                != HASHPIPE_OK) {
            if (rv==HASHPIPE_TIMEOUT) {
                hashpipe_status_lock_safe(&st);
                hputs(st.buf, status_key, "blocked");
                hashpipe_status_unlock_safe(&st);
                continue;
            } else {
                hashpipe_error(__FUNCTION__, "error waiting for free databuf");
                pthread_exit(NULL);
                break;
            }
        }

        hashpipe_status_lock_safe(&st);
        hputs(st.buf, status_key, "receiving");
        hashpipe_status_unlock_safe(&st);
 
        // populate block header
        db->block[block_idx].header.mcnt = mcnt;
        db->block[block_idx].header.coarse_chan_id = 321;
        db->block[block_idx].header.num_coarse_chan = num_coarse_chan;
        memset(db->block[block_idx].header.missed_pkts, 0, sizeof(uint64_t) * N_BEAM_SLOTS);

        if(gen_fake) {
            gen_fake = 0;
            // gen fake data for all beams, all blocks   
            // TODO vary data by beam
            fprintf(stderr, "generating fake data to block 0 beam 0...");
            gen_fake_data(&(db->block[0].data[0]));
            fprintf(stderr, " done\n");
            fprintf(stderr, "copying to block 0 beam");
            for(int beam_i = 1; beam_i < N_BEAMS; beam_i++) {
                fprintf(stderr, " %d", beam_i);
                memcpy((void *)&db->block[0].data[beam_i*N_BYTES_PER_BEAM/sizeof(uint64_t)], 
                    (void *)&db->block[0].data[0], 
                    N_BYTES_PER_BEAM);
            }
            fprintf(stderr, " done\n");
            fprintf(stderr, "copying to block");
            for(int block_i = 1; block_i < N_INPUT_BLOCKS; block_i++) {
                fprintf(stderr, " %d", block_i);
                memcpy((void *)&db->block[block_i].data[0], 
                    (void *)&db->block[0].data[0], 
                    N_DATA_BYTES_PER_BLOCK);
            }
            fprintf(stderr, " done\n");
        }

        hashpipe_status_lock_safe(&st);
        hputr4(st.buf, "NETMXERR", max_error);
        hputi4(st.buf, "NETERCNT", error_count);
        hputi4(st.buf, "NETMXECT", max_error_count);
        hashpipe_status_unlock_safe(&st);

        // Mark block as full
        s6_input_databuf_set_filled(db, block_idx);

        // Setup for next block
        block_idx = (block_idx + 1) % db->header.n_block;
        mcnt++;
        // uncomment the following to test dynamic setting of num_coarse_chan
        //num_coarse_chan--;

        /* Will exit if thread has been cancelled */
        pthread_testcancel();
    }

    // Thread success!
    return THREAD_OK;
}
Esempio n. 27
0
/* This thread is passed a single arg, pointer
 * to the vegas_udp_params struct.  This thread should 
 * be cancelled and restarted if any hardware params
 * change, as this potentially affects packet size, etc.
 */
void *vegas_net_thread(void *_args) {

    /* Get arguments */
    struct vegas_thread_args *args = (struct vegas_thread_args *)_args;
    int rv;

    /* Set cpu affinity */
    cpu_set_t cpuset, cpuset_orig;
    sched_getaffinity(0, sizeof(cpu_set_t), &cpuset_orig);
    //CPU_ZERO(&cpuset);
    CPU_SET(13, &cpuset);
    rv = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset);
    if (rv<0) { 
        vegas_error("vegas_net_thread", "Error setting cpu affinity.");
        perror("sched_setaffinity");
    }

    /* Set priority */
    rv = setpriority(PRIO_PROCESS, 0, args->priority);
    if (rv<0) {
        vegas_error("vegas_net_thread", "Error setting priority level.");
        perror("set_priority");
    }

    /* Attach to status shared mem area */
    struct vegas_status st;
    rv = vegas_status_attach(&st);
    if (rv!=VEGAS_OK) {
        vegas_error("vegas_net_thread", 
                "Error attaching to status shared memory.");
        pthread_exit(NULL);
    }
    pthread_cleanup_push((void *)vegas_status_detach, &st);
    pthread_cleanup_push((void *)set_exit_status, &st);

    /* Init status, read info */
    vegas_status_lock_safe(&st);
    hputs(st.buf, STATUS_KEY, "init");
    vegas_status_unlock_safe(&st);

    /* Read in general parameters */
    struct vegas_params gp;
    struct sdfits pf;
    char status_buf[VEGAS_STATUS_SIZE];
    vegas_status_lock_safe(&st);
    memcpy(status_buf, st.buf, VEGAS_STATUS_SIZE);
    vegas_status_unlock_safe(&st);
    vegas_read_obs_params(status_buf, &gp, &pf);
    pthread_cleanup_push((void *)vegas_free_sdfits, &pf);

    /* Read network params */
    struct vegas_udp_params up;
    vegas_read_net_params(status_buf, &up);

    /* Attach to databuf shared mem */
    struct vegas_databuf *db;
    db = vegas_databuf_attach(args->output_buffer); 
    if (db==NULL) {
        vegas_error("vegas_net_thread",
                "Error attaching to databuf shared memory.");
        pthread_exit(NULL);
    }
    pthread_cleanup_push((void *)vegas_databuf_detach, db);

    /* Time parameters */
    double meas_stt_mjd=0.0;
    double meas_stt_offs=0.0;

    /* See which packet format to use */
    int nchan=0, npol=0;
    nchan = pf.hdr.nchan;
    npol = pf.hdr.npol;

    /* Figure out size of data in each packet, number of packets
     * per block, etc.  Changing packet size during an obs is not
     * recommended.
     */
    int block_size;
    struct vegas_udp_packet p;
    size_t heap_size, spead_hdr_size;
    unsigned int heaps_per_block, packets_per_heap; 
    char bw_mode[16];

    if (hgets(status_buf, "BW_MODE", 16, bw_mode))
    {
        if(strncmp(bw_mode, "high", 4) == 0)
        {
            heap_size = sizeof(struct freq_spead_heap) + nchan*4*sizeof(int);
            spead_hdr_size = sizeof(struct freq_spead_heap);
            packets_per_heap = nchan*4*sizeof(int) / PAYLOAD_SIZE;
        }
        else if(strncmp(bw_mode, "low", 3) == 0)
        {
            heap_size = sizeof(struct time_spead_heap) + PAYLOAD_SIZE;
            spead_hdr_size = sizeof(struct time_spead_heap);
            packets_per_heap = 1;
        }
        else
            vegas_error("vegas_net_thread", "Unsupported bandwidth mode");
    }
    else
        vegas_error("vegas_net_thread", "BW_MODE not set");

    if (hgeti4(status_buf, "BLOCSIZE", &block_size)==0) {
            block_size = db->block_size;
            hputi4(status_buf, "BLOCSIZE", block_size);
    } else {
        if (block_size > db->block_size) {
            vegas_error("vegas_net_thread", "BLOCSIZE > databuf block_size");
            block_size = db->block_size;
            hputi4(status_buf, "BLOCSIZE", block_size);
        }
    }
    heaps_per_block =   (block_size - MAX_HEAPS_PER_BLK*spead_hdr_size) /
                        (heap_size - spead_hdr_size);

    /* List of databuf blocks currently in use */
    unsigned i;
    const int nblock = 2;
    struct datablock_stats blocks[nblock];
    for (i=0; i<nblock; i++) 
        init_block(&blocks[i], db, heap_size, spead_hdr_size, heaps_per_block);

    /* Convenience names for first/last blocks in set */
    struct datablock_stats *fblock, *lblock;
    fblock = &blocks[0];
    lblock = &blocks[nblock-1];

    /* Misc counters, etc */
    char *curdata=NULL, *curheader=NULL, *curindex=NULL;
    unsigned int heap_cntr=0, last_heap_cntr=2048, nextblock_heap_cntr=0;
    unsigned int heap_offset;
    unsigned int seq_num=0, last_seq_num=1050;
    int heap_cntr_diff, seq_num_diff;
    unsigned int obs_started = 0;
    unsigned long long npacket_total, npacket_this_block=0, ndropped_total;
    double drop_frac_avg=0.0;
    const double drop_lpf = 0.25;
    prev_heap_cntr = 0;
    prev_heap_offset = 0;
    char msg[256];

    /* Give all the threads a chance to start before opening network socket */
    sleep(1);

    /* Set up UDP socket */
    rv = vegas_udp_init(&up);
    if (rv!=VEGAS_OK) {
        vegas_error("vegas_net_thread",
                "Error opening UDP socket.");
        pthread_exit(NULL);
    }
    pthread_cleanup_push((void *)vegas_udp_close, &up);

    /* Main loop */
    unsigned force_new_block=0, waiting=-1;
    signal(SIGINT,cc);
    while (run) {

        /* Wait for data */
        rv = vegas_udp_wait(&up);
        if (rv!=VEGAS_OK) {
            if (rv==VEGAS_TIMEOUT) { 
                /* Set "waiting" flag */
                if (waiting!=1) {
                    vegas_status_lock_safe(&st);
                    hputs(st.buf, STATUS_KEY, "waiting");
                    vegas_status_unlock_safe(&st);
                    waiting=1;
                }
                continue; 
            } else {
                vegas_error("vegas_net_thread", 
                        "vegas_udp_wait returned error");
                perror("vegas_udp_wait");
                pthread_exit(NULL);
            }
        }
	
        /* Read packet */
        rv = vegas_udp_recv(&up, &p, bw_mode);
        if (rv!=VEGAS_OK) {
            if (rv==VEGAS_ERR_PACKET) {
                #ifdef DEBUG_NET
                vegas_warn("vegas_net_thread", "Incorrect pkt size");
                #endif
                continue; 
            } else {
                vegas_error("vegas_net_thread", 
                        "vegas_udp_recv returned error");
                perror("vegas_udp_recv");
                pthread_exit(NULL);
            }
        }
	
        /* Update status if needed */
        if (waiting!=0) {
            vegas_status_lock_safe(&st);
            hputs(st.buf, STATUS_KEY, "receiving");
            vegas_status_unlock_safe(&st);
            waiting=0;
        }

        /* Check seq num diff */
        heap_cntr = vegas_spead_packet_heap_cntr(&p);
        heap_offset = vegas_spead_packet_heap_offset(&p);
        seq_num = vegas_spead_packet_seq_num(heap_cntr, heap_offset, packets_per_heap);

        heap_cntr_diff = heap_cntr - last_heap_cntr;
        seq_num_diff = (int)(seq_num - last_seq_num);
        
        last_seq_num = seq_num;
        last_heap_cntr = heap_cntr;

        if (seq_num_diff<=0) { 

            if (seq_num_diff<-1024)
            {
                force_new_block=1;
                obs_started = 1;

                #ifdef DEBUG_NET
                printf("Debug: observation started\n");
                #endif
            }
            else if (seq_num_diff==0) {
                sprintf(msg, "Received duplicate packet (seq_num=%d)", seq_num);
                vegas_warn("vegas_net_thread", msg);
            }
            else  {
                #ifdef DEBUG_NET
                sprintf(msg, "out of order packet. Diff = %d", seq_num_diff);
                vegas_warn("vegas_net_thread", msg);
                #endif
                continue;   /* No going backwards */
            }
        } else { 
            force_new_block=0; 
            npacket_total += seq_num_diff;
            ndropped_total += seq_num_diff - 1;
            npacket_this_block += seq_num_diff;
            fblock->pkts_dropped += seq_num_diff - 1;

            #ifdef DEBUG_NET
            if(seq_num_diff > 1)
            {
                sprintf(msg, "Missing packet. seq_num_diff = %d", seq_num_diff);
                vegas_warn("vegas_net_thread", msg);
            }
            #endif

        }

        /* If obs has not started, ignore this packet */
        if(!obs_started)
        {
            fblock->pkts_dropped = 0;
            npacket_total = 0;
            ndropped_total = 0;
            npacket_this_block = 0;

            continue;
        }

        /* Determine if we go to next block */
        if (heap_cntr>=nextblock_heap_cntr || force_new_block)
        {
            /* Update drop stats */
            if (npacket_this_block > 0)  
                drop_frac_avg = (1.0-drop_lpf)*drop_frac_avg 
                    + drop_lpf *
                    (double)fblock->pkts_dropped / (double)npacket_this_block;

            vegas_status_lock_safe(&st);
            hputi8(st.buf, "NPKT", npacket_total);
            hputi8(st.buf, "NDROP", ndropped_total);
            hputr8(st.buf, "DROPAVG", drop_frac_avg);
            hputr8(st.buf, "DROPTOT", 
                    npacket_total ? 
                    (double)ndropped_total/(double)npacket_total 
                    : 0.0);
            hputi4(st.buf, "NETBLKOU", fblock->block_idx);
            vegas_status_unlock_safe(&st);
            
            /* Finalize first block, and push it off the list.
             * Then grab next available block.
             */
            if (fblock->block_idx>=0) finalize_block(fblock);
            block_stack_push(blocks, nblock);
            increment_block(lblock, heap_cntr);
            curdata = vegas_databuf_data(db, lblock->block_idx);
            curheader = vegas_databuf_header(db, lblock->block_idx);
            curindex = vegas_databuf_index(db, lblock->block_idx);
            nextblock_heap_cntr = lblock->heap_idx + heaps_per_block;
            npacket_this_block = 0;

            /* If new obs started, reset total counters, get start
             * time.  Start time is rounded to nearest integer
             * second, with warning if we're off that by more
             * than 100ms.  Any current blocks on the stack
             * are also finalized/reset */            

            if (force_new_block) {
            
                /* Reset stats */
                npacket_total=0;
                ndropped_total=0;
                npacket_this_block = 0;

                /* Get obs start time */
                get_current_mjd_double(&meas_stt_mjd);
                
                printf("vegas_net_thread: got start packet at MJD %f", meas_stt_mjd);
                
                meas_stt_offs = meas_stt_mjd*24*60*60 - floor(meas_stt_mjd*24*60*60);

                if(meas_stt_offs > 0.1 && meas_stt_offs < 0.9)
                { 
                    char msg[256];
                    sprintf(msg, 
                            "Second fraction = %3.1f ms > +/-100 ms",
                            meas_stt_offs*1e3);
                    vegas_warn("vegas_net_thread", msg);
                }

                vegas_status_lock_safe(&st);
                hputnr8(st.buf, "M_STTMJD", 8, meas_stt_mjd);
                hputr8(st.buf, "M_STTOFF", meas_stt_offs);
                vegas_status_unlock_safe(&st);

                /* Warn if 1st packet number is not zero */
                if (seq_num!=0) {
                    char msg[256];
                    sprintf(msg, "First packet number is not 0 (seq_num=%d)", seq_num);
                    vegas_warn("vegas_net_thread", msg);
                }
            
            }
            
            /* Read current status shared mem */
            vegas_status_lock_safe(&st);
            memcpy(status_buf, st.buf, VEGAS_STATUS_SIZE);
            vegas_status_unlock_safe(&st);

            /* Wait for new block to be free, then clear it
             * if necessary and fill its header with new values.
             */
            while ((rv=vegas_databuf_wait_free(db, lblock->block_idx)) 
                    != VEGAS_OK) {
                if (rv==VEGAS_TIMEOUT) {
                    waiting=1;
                    vegas_warn("vegas_net_thread", "timeout while waiting for output block\n");
                    vegas_status_lock_safe(&st);
                    hputs(st.buf, STATUS_KEY, "blocked");
                    vegas_status_unlock_safe(&st);
                    continue;
                } else {
                    vegas_error("vegas_net_thread", 
                            "error waiting for free databuf");
                    run=0;
                    pthread_exit(NULL);
                    break;
                }
            }
            memcpy(curheader, status_buf, VEGAS_STATUS_SIZE);
            memset(curdata, 0, block_size);
            memset(curindex, 0, db->index_size);
        }

        /* Copy packet into any blocks where it belongs.
         * The "write packets" functions also update drop stats 
         * for blocks, etc.
         */
        int nblocks = 0;
        for (i=0; i<nblock; i++)
        {
            if ((blocks[i].block_idx>=0) && (block_heap_check(&blocks[i],heap_cntr)==0))
            {
            	if (nblocks > 0) {
            		printf("vegas_net_thread: Warning! Adding packet to more than one block! heap_cntr= %d, block = %d",heap_cntr,i);
            	}
            	nblocks++;
                write_spead_packet_to_block(&blocks[i], &p, heap_cntr,
                                heap_offset, packets_per_heap, bw_mode);
            }
        }

        /* Will exit if thread has been cancelled */
        pthread_testcancel();
    }

    pthread_exit(NULL);

    /* Have to close all push's */
    pthread_cleanup_pop(0); /* Closes push(vegas_udp_close) */
    pthread_cleanup_pop(0); /* Closes set_exit_status */
    pthread_cleanup_pop(0); /* Closes vegas_free_psrfits */
    pthread_cleanup_pop(0); /* Closes vegas_status_detach */
    pthread_cleanup_pop(0); /* Closes vegas_databuf_detach */
}
Esempio n. 28
0
static void *run(void * _args)
{
    // Cast _args
    struct guppi_thread_args *args = (struct guppi_thread_args *)_args;

#ifdef DEBUG_SEMS
    fprintf(stderr, "s/tid %lu/                      FLUFf/\n", pthread_self());
#endif

    THREAD_RUN_BEGIN(args);

    THREAD_RUN_SET_AFFINITY_PRIORITY(args);

    /* Attach to status shared mem area */
    THREAD_RUN_ATTACH_STATUS(args->instance_id, st);

    /* Attach to paper_input_databuf */
    THREAD_RUN_ATTACH_DATABUF(args->instance_id,
        paper_input_databuf, db_in, args->input_buffer);

    /* Attach to paper_gpu_input_databuf */
    THREAD_RUN_ATTACH_DATABUF(args->instance_id,
        paper_gpu_input_databuf, db_out, args->output_buffer);

    // Init status variables
    guppi_status_lock_safe(&st);
    hputi8(st.buf, "FLUFMCNT", 0);
    guppi_status_unlock_safe(&st);

    /* Loop */
    int rv;
    int curblock_in=0;
    int curblock_out=0;

    struct timespec start, finish;

    while (run_threads) {

        // Note waiting status,
        // query integrating status
        // and, if armed, start count
        guppi_status_lock_safe(&st);
        hputs(st.buf, STATUS_KEY, "waiting");
        guppi_status_unlock_safe(&st);

        // Wait for new input block to be filled
        while ((rv=paper_input_databuf_wait_filled(db_in, curblock_in)) != GUPPI_OK) {
            if (rv==GUPPI_TIMEOUT) {
                guppi_status_lock_safe(&st);
                hputs(st.buf, STATUS_KEY, "blocked_in");
                guppi_status_unlock_safe(&st);
                continue;
            } else {
                guppi_error(__FUNCTION__, "error waiting for filled databuf");
                run_threads=0;
                pthread_exit(NULL);
                break;
            }
        }

        // Wait for new gpu_input block (our output block) to be free
        while ((rv=paper_gpu_input_databuf_wait_free(db_out, curblock_out)) != GUPPI_OK) {
            if (rv==GUPPI_TIMEOUT) {
                guppi_status_lock_safe(&st);
                hputs(st.buf, STATUS_KEY, "blocked gpu input");
                guppi_status_unlock_safe(&st);
                continue;
            } else {
                guppi_error(__FUNCTION__, "error waiting for free databuf");
                run_threads=0;
                pthread_exit(NULL);
                break;
            }
        }

        // Got a new data block, update status
        guppi_status_lock_safe(&st);
        hputs(st.buf, STATUS_KEY, "fluffing");
        hputi4(st.buf, "FLUFBKIN", curblock_in);
        hputu8(st.buf, "FLUFMCNT", db_in->block[curblock_in].header.mcnt);
        guppi_status_unlock_safe(&st);

        // Copy header and call fluff function
        clock_gettime(CLOCK_MONOTONIC, &start);

        memcpy(&db_out->block[curblock_out].header, &db_in->block[curblock_in].header, sizeof(paper_input_header_t));

        paper_fluff(db_in->block[curblock_in].data, db_out->block[curblock_out].data);

        clock_gettime(CLOCK_MONOTONIC, &finish);

        // Note processing time
        guppi_status_lock_safe(&st);
        // Bits per fluff / ns per fluff = Gbps
        hputr4(st.buf, "FLUFGBPS", (float)(8*N_BYTES_PER_BLOCK)/ELAPSED_NS(start,finish));
        guppi_status_unlock_safe(&st);

        // Mark input block as free and advance
        paper_input_databuf_set_free(db_in, curblock_in);
        curblock_in = (curblock_in + 1) % db_in->header.n_block;

        // Mark output block as full and advance
        paper_gpu_input_databuf_set_filled(db_out, curblock_out);
        curblock_out = (curblock_out + 1) % db_out->header.n_block;

        /* Check for cancel */
        pthread_testcancel();
    }
    run_threads=0;

    // Have to close all pushes
    THREAD_RUN_DETACH_DATAUF;
    THREAD_RUN_DETACH_DATAUF;
    THREAD_RUN_DETACH_STATUS;
    THREAD_RUN_END;

    // Thread success!
    return NULL;
}
void vegas_sdfits_thread(void *_args) {
    
    /* Get args */
    struct vegas_thread_args *args = (struct vegas_thread_args *)_args;
    pthread_cleanup_push((void *)vegas_thread_set_finished, args);
    
    /* Set cpu affinity */
    int rv = sched_setaffinity(0, sizeof(cpu_set_t), &args->cpuset);
    if (rv<0) { 
        vegas_error("vegas_sdfits_thread", "Error setting cpu affinity.");
        perror("sched_setaffinity");
    }

    /* Set priority */
    rv=0;
    if (args->priority != 0)
    {
        struct sched_param priority_param;
        priority_param.sched_priority = args->priority;
        rv = pthread_setschedparam(pthread_self(), SCHED_FIFO, &priority_param);
    }
    if (rv<0) {
        vegas_error("vegas_sdfits_thread", "Error setting priority level.");
        perror("set_priority");
    }
    
    /* Attach to status shared mem area */
    struct vegas_status st;
    rv = vegas_status_attach(&st);
    if (rv!=VEGAS_OK) {
        vegas_error("vegas_sdfits_thread", 
                    "Error attaching to status shared memory.");
        pthread_exit(NULL);
    }
    pthread_cleanup_push((void *)vegas_status_detach, &st);
    pthread_cleanup_push((void *)set_exit_status, &st);
    
    /* Init status */
    vegas_status_lock_safe(&st);
    hputs(st.buf, STATUS_KEY, "init");
    vegas_status_unlock_safe(&st);
    
    /* Initialize some key parameters */
    struct vegas_params gp;
    struct sdfits sf;
    sf.data_columns.data = NULL;
    sf.filenum = 0;
    sf.new_file = 1; // This is crucial
    pthread_cleanup_push((void *)vegas_free_sdfits, &sf);
    pthread_cleanup_push((void *)sdfits_close, &sf);
    //pf.multifile = 0;  // Use a single file for fold mode
    sf.multifile = 1;  // Use a multiple files for fold mode
    sf.quiet = 0;      // Print a message per each subint written
    
    /* Attach to databuf shared mem */
    struct vegas_databuf *db;
    db = vegas_databuf_attach(args->input_buffer);
    if (db==NULL) {
        vegas_error("vegas_sdfits_thread",
                    "Error attaching to databuf shared memory.");
        pthread_exit(NULL);
    }
    pthread_cleanup_push((void *)vegas_databuf_detach, db);
    
    /* Loop */
    int curblock=0, total_status=0, firsttime=1, run=1, got_packet_0=0, dataset=0;
    char *ptr;
    char tmpstr[256];
    int scan_finished=0, old_filenum;
    int num_exposures_written = 0;
    int old_integ_num = -1;

    signal(SIGINT, cc);
    do {
        /* Note waiting status */
        vegas_status_lock_safe(&st);
        if (got_packet_0)
            sprintf(tmpstr, "waiting(%d)", curblock);
        else
            sprintf(tmpstr, "ready");
        hputs(st.buf, STATUS_KEY, tmpstr);
        vegas_status_unlock_safe(&st);
        
        /* Wait for buf to have data */
        rv = vegas_databuf_wait_filled(db, curblock);
        if (rv!=0) {
            // This is a big ol' kludge to avoid this process hanging
            // due to thread synchronization problems.
            sleep(1);
            continue; 
        }

        /* Note current block */
        vegas_status_lock_safe(&st);
        hputi4(st.buf, "DSKBLKIN", curblock);
        vegas_status_unlock_safe(&st);

        /* See how full databuf is */
        total_status = vegas_databuf_total_status(db);
        
        /* Read param structs for this block */
        ptr = vegas_databuf_header(db, curblock);
        if (firsttime) {
            vegas_read_obs_params(ptr, &gp, &sf);
            firsttime = 0;
        } else {
            vegas_read_subint_params(ptr, &gp, &sf);
        }

        /* Note waiting status */
        vegas_status_lock_safe(&st);
        hputs(st.buf, STATUS_KEY, "writing");
        vegas_status_unlock_safe(&st);

        struct sdfits_data_columns* data_cols;
        struct databuf_index* db_index;

        db_index = (struct databuf_index*)(vegas_databuf_index(db, curblock));

        /* Read the block index, writing each dataset to a SDFITS file */
        for(dataset = 0; dataset < db_index->num_datasets; dataset++)
        {
            data_cols = (struct sdfits_data_columns*)(vegas_databuf_data(db, curblock) +
                        db_index->disk_buf[dataset].struct_offset);

            sf.data_columns = *data_cols;

            /* Write the data */
            old_filenum = sf.filenum;
            sdfits_write_subint(&sf);

            /*Write new file number to shared memory*/
            if(sf.filenum != old_filenum)
            {
                vegas_status_lock_safe(&st);
                hputi4(st.buf, "FILENUM", sf.filenum);
                vegas_status_unlock_safe(&st);
            }

            /* If a new integration number, increment the number of exposures written */
            if(data_cols->integ_num != old_integ_num)
            {
                num_exposures_written += 1;
                old_integ_num = data_cols->integ_num;
            }

        }

        /* Indicate number of exposures written */
        vegas_status_lock_safe(&st);
        hputi4(st.buf, "DSKEXPWR", num_exposures_written);
        vegas_status_unlock_safe(&st);

        /* For debugging... */
        if (gp.drop_frac > 0.0) {
            printf("Block %d dropped %.3g%% of the packets\n", 
                    sf.tot_rows, gp.drop_frac*100.0);
        }

        /* Mark as free */
        vegas_databuf_set_free(db, curblock);
        
        /* Go to next block */
        curblock = (curblock + 1) % db->n_block;
        
        /* Check for cancel */
        pthread_testcancel();
        
    } while (run && !scan_finished);
    
    /* Cleanup */
    pthread_exit(NULL);
    
    pthread_cleanup_pop(0); /* Closes sdfits_close */
    pthread_cleanup_pop(0); /* Closes vegas_free_sdfits */
    pthread_cleanup_pop(0); /* Closes set_exit_status */
    pthread_cleanup_pop(0); /* Closes set_finished */
    pthread_cleanup_pop(0); /* Closes vegas_status_detach */
    pthread_cleanup_pop(0); /* Closes vegas_databuf_detach */
}
Esempio n. 30
0
static void *run(hashpipe_thread_args_t * args)
{
    // Local aliases to shorten access to args fields
    // Our input buffer is a paper_input_databuf
    // Our output buffer is a paper_gpu_input_databuf
    paper_input_databuf_t *db_in = (paper_input_databuf_t *)args->ibuf;
    paper_gpu_input_databuf_t *db_out = (paper_gpu_input_databuf_t *)args->obuf;
    hashpipe_status_t st = args->st;
    const char * status_key = args->thread_desc->skey;

#ifdef DEBUG_SEMS
    fprintf(stderr, "s/tid %lu/                      FLUFf/\n", pthread_self());
#endif

    // Init status variables
    hashpipe_status_lock_safe(&st);
    hputi8(st.buf, "FLUFMCNT", 0);
    hashpipe_status_unlock_safe(&st);

    /* Loop */
    int rv;
    int curblock_in=0;
    int curblock_out=0;
    float gbps, min_gbps;

    struct timespec start, finish;

    while (run_threads()) {

        // Note waiting status,
        // query integrating status
        // and, if armed, start count
        hashpipe_status_lock_safe(&st);
        hputs(st.buf, status_key, "waiting");
        hashpipe_status_unlock_safe(&st);

        // Wait for new input block to be filled
        while ((rv=paper_input_databuf_wait_filled(db_in, curblock_in)) != HASHPIPE_OK) {
            if (rv==HASHPIPE_TIMEOUT) {
                hashpipe_status_lock_safe(&st);
                hputs(st.buf, status_key, "blocked_in");
                hashpipe_status_unlock_safe(&st);
                continue;
            } else {
                hashpipe_error(__FUNCTION__, "error waiting for filled databuf");
                pthread_exit(NULL);
                break;
            }
        }

        // Wait for new gpu_input block (our output block) to be free
        while ((rv=paper_gpu_input_databuf_wait_free(db_out, curblock_out)) != HASHPIPE_OK) {
            if (rv==HASHPIPE_TIMEOUT) {
                hashpipe_status_lock_safe(&st);
                hputs(st.buf, status_key, "blocked gpu input");
                hashpipe_status_unlock_safe(&st);
                continue;
            } else {
                hashpipe_error(__FUNCTION__, "error waiting for free databuf");
                pthread_exit(NULL);
                break;
            }
        }

        // Got a new data block, update status
        hashpipe_status_lock_safe(&st);
        hputs(st.buf, status_key, "fluffing");
        hputi4(st.buf, "FLUFBKIN", curblock_in);
        hputu8(st.buf, "FLUFMCNT", db_in->block[curblock_in].header.mcnt);
        hashpipe_status_unlock_safe(&st);

        // Copy header and call fluff function
        clock_gettime(CLOCK_MONOTONIC, &start);

        memcpy(&db_out->block[curblock_out].header, &db_in->block[curblock_in].header, sizeof(paper_input_header_t));

        paper_fluff(db_in->block[curblock_in].data, db_out->block[curblock_out].data);

        clock_gettime(CLOCK_MONOTONIC, &finish);

        // Note processing time
        hashpipe_status_lock_safe(&st);
        // Bits per fluff / ns per fluff = Gbps
        hgetr4(st.buf, "FLUFMING", &min_gbps);
        gbps = (float)(8*N_BYTES_PER_BLOCK)/ELAPSED_NS(start,finish);
        hputr4(st.buf, "FLUFGBPS", gbps);
        if(min_gbps == 0 || gbps < min_gbps) {
          hputr4(st.buf, "FLUFMING", gbps);
        }
        hashpipe_status_unlock_safe(&st);

        // Mark input block as free and advance
        paper_input_databuf_set_free(db_in, curblock_in);
        curblock_in = (curblock_in + 1) % db_in->header.n_block;

        // Mark output block as full and advance
        paper_gpu_input_databuf_set_filled(db_out, curblock_out);
        curblock_out = (curblock_out + 1) % db_out->header.n_block;

        /* Check for cancel */
        pthread_testcancel();
    }

    // Thread success!
    return NULL;
}