Ejemplo n.º 1
0
static void y422p_handler(Instance *pi, void *data)
{
  NScale_private *priv = (NScale_private *)pi;
  YUV422P_buffer *y422p_in = data;

  if (pi->outputs[OUTPUT_YUV422P].destination) {

    if (priv->Nx == 1 && priv->Ny == 1) {
      /* Pass-through */
      PostData(y422p_in, pi->outputs[OUTPUT_YUV422P].destination);
      return;
    }

    /* else... */

    YUV422P_buffer *y422p_out = YUV422P_buffer_new(y422p_in->width/priv->Nx, y422p_in->height/priv->Ny,
                                               &y422p_in->c);
    N_scale(priv, y422p_in->y, y422p_in->width, y422p_in->height,
            y422p_out->y, y422p_out->width, y422p_out->height);
    N_scale(priv, y422p_in->cr, y422p_in->cr_width, y422p_in->cr_height,
            y422p_out->cr, y422p_out->cr_width, y422p_out->cr_height);
    N_scale(priv, y422p_in->cb, y422p_in->cb_width, y422p_in->cb_height,
            y422p_out->cb, y422p_out->cb_width, y422p_out->cb_height);
    dpf("posting %dx%d image to output\n", y422p_out->width, y422p_out->height);
    y422p_out->c.timestamp = y422p_in->c.timestamp;
    PostData(y422p_out, pi->outputs[OUTPUT_YUV422P].destination);
  }
  YUV422P_buffer_release(y422p_in);
}
Ejemplo n.º 2
0
  /** 
   * End of job processing 
   */
  void Terminate(Option_t*)
  {
    fList = dynamic_cast<TList*>(GetOutputData(1));
    if (!fList) {
      AliError(Form("No output list defined (%p)", GetOutputData(1)));
      if (GetOutputData(1)) GetOutputData(1)->Print();
      return;
    }


    TList* output = new TList;
    output->SetName("triggerResults");
    output->SetOwner();

    fVertexMC = static_cast<TH1D*>(fList->FindObject("vertexMC"));
    fVertexESD = static_cast<TH1D*>(fList->FindObject("vertexESD"));
    fM         = static_cast<TH1D*>(fList->FindObject("m"));
    if (fVertexMC) { 
      TH1D* vtxMC = static_cast<TH1D*>(fVertexMC->Clone("vertexMC"));
      vtxMC->SetDirectory(0);
      if (vtxMC->GetEntries() > 0)
	vtxMC->Scale(1. / vtxMC->GetEntries());
      else 
	vtxMC->Scale(0);
      output->Add(vtxMC);
    }
    if (fVertexESD) { 
      TH1D* vtxESD = static_cast<TH1D*>(fVertexESD->Clone("vertexESD"));
      vtxESD->SetDirectory(0);
      if (vtxESD->GetEntries() > 0)
	vtxESD->Scale(1. / vtxESD->GetEntries());
      else 
	vtxESD->Scale(0);
      output->Add(vtxESD);
    }
    if (fM) { 
      TH1D* m = static_cast<TH1D*>(fM->Clone("m"));
      m->SetDirectory(0);
      m->SetYTitle("P(N_{ch}|_{|#eta|<1} < X)");
      if (m->GetBinContent(1) > 0)
	m->Scale(1. / m->GetBinContent(1));
      else 
	m->Scale(0);
      output->Add(m);
    }      

    TString vtxReq;
    if (fVertexRequirement & kMC)  vtxReq.Append("MC ");
    if (fVertexRequirement & kESD) vtxReq.Append("ESD ");
    output->Add(new TNamed("vtxReq", vtxReq.Data()));
    output->Add(new TNamed("trkReq",
			   fTrackletRequirement == kMC ? "MC" : "ESD"));

    fInel.Finish(fList, output);
    fInelGt0.Finish(fList, output);
    fNSD.Finish(fList, output);
    fNClusterGt0.Finish(fList, output);

    PostData(2, output);
  }
Ejemplo n.º 3
0
static void RGB3_handler(Instance *pi, void *msg)
{
  VFilter_private *priv = (VFilter_private *)pi;
  RGB3_buffer *rgb3 = msg;

  if (priv->trim) {
    single_trim(priv, rgb3->data, rgb3->data, rgb3->width*3, rgb3->height);
  }

  if (priv->top_crop) {
    RGB3_buffer *tmp = RGB3_buffer_new(rgb3->width, rgb3->height - priv->top_crop, &rgb3->c);
    memcpy(tmp->data, rgb3->data+(rgb3->width*priv->top_crop*3), tmp->data_length);
    RGB3_buffer_release(rgb3);
    rgb3 = tmp;
  }

  if (priv->bottom_crop) {
    RGB3_buffer *tmp = RGB3_buffer_new(rgb3->width, rgb3->height - priv->bottom_crop, &rgb3->c);
    memcpy(tmp->data, rgb3->data, tmp->width * 3 * tmp->height);
    RGB3_buffer_release(rgb3);
    rgb3 = tmp;
  }

  if (pi->outputs[OUTPUT_RGB3].destination) {
    PostData(rgb3, pi->outputs[OUTPUT_RGB3].destination);
  }
  else {
    RGB3_buffer_release(rgb3);
  }


}
Ejemplo n.º 4
0
void CPostData::sendData(IN LPCTSTR lpszUrl, IN LPSTR pData, IN DWORD dwLen, IN DWORD dwTimeOut)
{
	ATLASSERT(NULL != pData && dwLen > 0);
	if (NULL == pData || dwLen <= 0)
		return;
	if(dwTimeOut > 0 && dwTimeOut <= 60000)
		m_dwTimeOut = dwTimeOut;
	else
		m_dwTimeOut = 60000;


	// 先转换成宽字节
	int nLength = MultiByteToWideChar( CP_ACP,  0, (char*)pData,  -1, 0, 0);
	if (nLength <= 0)
		return;
	WCHAR *pWchar = new WCHAR[nLength + 1];
	memset(pWchar, 0, sizeof(WCHAR)*(nLength + 1));
	MultiByteToWideChar(CP_ACP, 0, (char*)pData, -1, pWchar, nLength);


	// 转换成UTF-8下的编码
	nLength = WideCharToMultiByte(CP_UTF8, 0, pWchar, -1, NULL, 0, NULL, NULL);
	if (nLength <= 0)
		return;
	char* pStrData = new char[nLength + 1];
	memset(pStrData, 0, sizeof(char)*(nLength + 1));
	::WideCharToMultiByte(CP_UTF8, 0, pWchar, nLength, pStrData, nLength, NULL,FALSE);

	delete []pWchar;

	std::string allXmlStr = pStrData;
	delete []pStrData;

	DWORD dwBufLen = strlen(allXmlStr.c_str()) + 32;
	LPSTR lpszXmlInfo = new char[dwBufLen];
	
	strcpy_s(lpszXmlInfo,dwBufLen,allXmlStr.c_str());

	int dwSize = strlen(lpszXmlInfo) * 2 + 1;
	unsigned char* pszOut = new unsigned char[dwSize];
	base64_encode((LPBYTE)lpszXmlInfo, strlen(lpszXmlInfo), pszOut, &dwSize);
	pszOut[dwSize] = 0;


	std::string info = "xml=";
	info += UrlEncode((char *)pszOut);

	delete []pszOut;
	delete []lpszXmlInfo;


	PostData(lpszUrl,(LPVOID)info.c_str(),info.size() );
}
Ejemplo n.º 5
0
static int do_run(Instance *pi, const char *value)
{
  /* Clones and posts the jpeg buffer N times.  N=atoi(value). */
  JpegSource_private *priv = (JpegSource_private *)pi;
  int count = atoi(value);
  int i;

  if (!priv->jpeg) {
    fprintf(stderr, "JpegSource has no jpeg data!\n");
    return 1;
  }

  if (!pi->outputs[OUTPUT_JPEG].destination) {
    fprintf(stderr, "JpegSource has no destination!\n");
    return 1;
  }

  Image_common c = { .label = priv->label };

  for (i=0; i < count; i++) {
    Jpeg_buffer *tmp = Jpeg_buffer_from(priv->jpeg->data, priv->jpeg->encoded_length, &c);
    dpf("%d/%d (%d)\n", i, count,
        pi->outputs[OUTPUT_JPEG].destination->parent->pending_messages);

    while (pi->outputs[OUTPUT_JPEG].destination->parent->pending_messages > 5) {
      /* Throttle output.  25ms sleep. */
      nanosleep(&(struct timespec){.tv_sec = 0, .tv_nsec = 25 * 1000 * 1000}, NULL);
    }

    PostData(tmp, pi->outputs[OUTPUT_JPEG].destination);
  }

  if (pi->outputs[OUTPUT_CONFIG].destination) {
    PostData(Config_buffer_new("quit", "0"), pi->outputs[OUTPUT_CONFIG].destination);
  }

  return 0;
}
Ejemplo n.º 6
0
bool
CHttp::SendData (char * data, unsigned long datalen, bool isbase64encoded, int * status, bool checkauth)
{

    DWORD           count = 0;

	if (!data )
		return false;

	/* Alloc HINTERNET handles */
	if ( !AllocHandles ( isbase64encoded, status, checkauth ) )
		return false;

retry:

	if (!PostData(data, datalen ) )
		return false;

	if (!CheckError (status ) )
    {

        if(count < 3 )
        {
            count++;
            goto retry;
        }

		return false;
    }

    count = 0;

    if (*status == HTTP_STATUS_DENIED || *status == HTTP_STATUS_PROXY_AUTH_REQ ) 
    {

        count++;

        if (count > 3 )
            return false;

        if (SetAuthDetails (m_HttpOpenRequest, *status ))
            goto retry;

    }

	return true;
}
Ejemplo n.º 7
0
static void y420p_handler(Instance *pi, void *msg)
{
  Y4MOverlay_private * priv = (Y4MOverlay_private *)pi;
  YUV420P_buffer * buf = msg;

  if (pi->outputs[OUTPUT_YUV420P].destination) {
    /* Compose overlay, pass along. */
    if (priv->img
        && priv->img->width == buf->width
        && priv->img->height == buf->height) {
      int y, x;

      uint8_t * py = priv->img->y;
      uint8_t * pcb = priv->img->cb;
      uint8_t * pcr = priv->img->cr;

      uint8_t * by = buf->y;
      uint8_t * bcb = buf->cb;
      uint8_t * bcr = buf->cr;

      for (y=0; y < buf->height; y++) {
        for (x=0; x < buf->width; x++) {
          if ( 1/* *py != 0x10 */ ) {
            *by = *py;
            *bcb = *pcb;
            *bcr = *pcr;
          }
          py++;
          by++;
          if (x%4 == 0) {
            pcb++;
            pcr++;
            bcb++;
            bcr++;
          }
        }
      }
    }
    PostData(buf, pi->outputs[OUTPUT_YUV420P].destination);
  }
  else {
    YUV420P_buffer_release(buf);
  }
}
Ejemplo n.º 8
0
static int generate_one_frame(Instance *pi)
{
  RGB3Source_private *priv = (RGB3Source_private *)pi;

  if (!priv->rgb3) {
    fprintf(stderr, "RGB3Source has no rgb3 data!\n");
    return 1;
  }

  if (!pi->outputs[OUTPUT_RGB3].destination) {
    fprintf(stderr, "RGB3Source has no destination!\n");
    return 1;
  }

  RGB3_buffer *tmp = RGB3_buffer_clone(priv->rgb3);
  PostData(tmp, pi->outputs[OUTPUT_RGB3].destination);

  return 0;
}
Ejemplo n.º 9
0
static void Wav_handler(Instance *pi, void *data)
{
  ALSAio_private *priv = (ALSAio_private *)pi;
  Wav_buffer *wav_in = data;
  int state;
  int dir = 0;
  int rc;
  //int i;
  snd_pcm_sframes_t n;

  if (!priv->c.enable) {
    return;
  }

  if (!priv->c.rate) {
    /* Set rate. */
    char channels[32];
    char rate[32];
    sprintf(rate, "%d", wav_in->params.rate);
    set_rate(pi, rate);

    /* Set channels. */
    sprintf(channels, "%d", wav_in->params.channels);
    set_channels(pi, channels);

    /* Set format */
    snd_pcm_format_t format = ALSAio_bps_to_snd_fmt(wav_in->params.bits_per_sample);
    if (format != SND_PCM_FORMAT_UNKNOWN) {
      priv->c.format = format;
      rc = snd_pcm_hw_params_set_format(priv->c.handle, priv->c.hwparams, priv->c.format);
      if (rc < 0) {
        fprintf(stderr, "*** %s: snd_pcm_hw_params_set_format %s: %s\n", __func__,
                s(priv->c.device), snd_strerror(rc));
      }
    }
  }

  state = snd_pcm_state(priv->c.handle);
  dpf("%s: state(1)=%s\n", __func__, ALSAio_state_to_string(state));

  if (state == SND_PCM_STATE_OPEN || state == SND_PCM_STATE_SETUP) {
    /* One time playback setup. */

    /* FIXME: Why does 64 work on NVidia CK804 with "snd_intel8x0"
       driver, but not 32, 128, 2048?  How to find out what will
       work? */
    snd_pcm_uframes_t frames =  priv->c.frames_per_io;

    fprintf(stderr, "%s: state=%s\n", __func__, ALSAio_state_to_string(state));

    rc = snd_pcm_hw_params_set_period_size_near(priv->c.handle, priv->c.hwparams, &frames, &dir);
    fprintf(stderr, "set_period_size_near returns %d (frames=%d)\n", rc, (int)frames);

    int periods = 4;

    rc = snd_pcm_hw_params_set_periods(priv->c.handle, priv->c.hwparams, periods, 0);
    if (rc < 0) {
      fprintf(stderr, "*** snd_pcm_hw_params_set_periods %s: %s\n", s(priv->c.device), snd_strerror(rc));
    }

    rc = snd_pcm_hw_params(priv->c.handle, priv->c.hwparams);
    if (rc < 0) {
      fprintf(stderr, "*** snd_pcm_hw_params %s: %s\n", s(priv->c.device), snd_strerror(rc));
    }

    state = snd_pcm_state(priv->c.handle);
    fprintf(stderr, "%s: state=%s\n", __func__, ALSAio_state_to_string(state));

    rc = snd_pcm_prepare(priv->c.handle);
    state = snd_pcm_state(priv->c.handle);
    fprintf(stderr, "%s: state=%s\n", __func__, ALSAio_state_to_string(state));
  }

  dpf("%s: state(2)=%s\n", __func__, ALSAio_state_to_string(state));

  int out_frames = wav_in->data_length / (priv->c.channels * priv->c.format_bytes);
  int frames_written = 0;
  while (1) {
    n = snd_pcm_writei(priv->c.handle, (uint8_t*)wav_in->data + (frames_written * (priv->c.channels * priv->c.format_bytes)),
                       out_frames);
    if (n > 0) {
      out_frames -= n;
      frames_written += n;
    }
    else {
      break;
    }
  }

  if (n < 0) {
    fprintf(stderr, "*** snd_pcm_writei %s: %s\n", s(priv->c.device), snd_strerror((int)n));
    fprintf(stderr, "*** attempting snd_pcm_prepare() to correct...\n");
    snd_pcm_prepare(priv->c.handle);
  }

  if (wav_in->no_feedback == 0  /* The default is 0, set to 1 if "filler" code below is called. */
      && pi->outputs[OUTPUT_FEEDBACK].destination) {
    Feedback_buffer *fb = Feedback_buffer_new();
    fb->seq = wav_in->seq;
    PostData(fb, pi->outputs[OUTPUT_FEEDBACK].destination);
  }

  Wav_buffer_release(&wav_in);
}
CLStatus CLDataPosterByNamedPipe::PostDelayedData(CLIOVectors *pIOVectors)
{
	return PostData(pIOVectors);
}
Ejemplo n.º 11
0
void ClientSocketCL::OnConnected() {
    ::printf("ClientSocket to Login Connected\n");
    PostData(COMMAND_LC_CONNECTED, nullptr, 0);
}
Ejemplo n.º 12
0
void ClientSocketCG::OnConnectFailed() {
    ::printf("ClientSocket to Gateway Connect Failed\n");
    PostData(COMMAND_GC_CONNECTFAILED, nullptr, 0);
}
Ejemplo n.º 13
0
void ClientSocketCG::OnDisconnected() {
    ::printf("ClientSocket to Gateway Disconnected\n");
    PostData(COMMAND_GC_DISCONNECTED, nullptr, 0);
}
Ejemplo n.º 14
0
static void compress_and_post(Instance *pi,
                              int width, int height,
                              uint8_t *c1, uint8_t *c2, uint8_t *c3,
                              Image_common *c,
                              int compress_mode)
{
  /* Compress input buffer.  See "libjpeg.txt" in IJPEG source, and "cjpeg.c". */
  CJpeg_private *priv = (CJpeg_private *)pi;
  struct jpeg_compress_struct cinfo;
  struct jpeg_error_mgr jerr;
  Jpeg_buffer *jpeg_out = 0L;
  double t1, t2;
  int report_time = 0;

  if (0) printf("%s:%s(width=%d height=%d c1=%p c2=%p c3=%p)\n",
         __FILE__, __func__,
         width, height,
         c1, c2, c3);

  cti_getdoubletime(&t1);

  cinfo.err = jpeg_std_error(&jerr); /* NOTE: See ERREXIT, error_exit,
                                        this may cause the program to call exit()! */
  jpeg_create_compress(&cinfo);

  jpeg_out = Jpeg_buffer_new(0, 0L); /* Pass 0 to let libjpeg allocate output buffer */
  jpeg_out->width = width;
  jpeg_out->height = height;

  if (c->timestamp == 0.0) {
    jpeg_out->c.timestamp = t1; /* Save timestamp. */
  }

  if (compress_mode == COMPRESS_Y422 || compress_mode == COMPRESS_Y420) {
    int w2 = (width/8)*8;
    int h2 = (height/8)*8;

    if (w2 != width) {
      fprintf(stderr, "warning: truncating width from %d to %d\n", width, w2);
      jpeg_out->width = w2;
    }
    if (h2 != height) {
      fprintf(stderr, "warning: truncating height from %d to %d\n", height, h2);
      jpeg_out->height = h2;
    }
    // jpeg_out->tv = y422p_in->tv;
  }

  jpeg_mem_dest(&cinfo, &jpeg_out->data, &jpeg_out->encoded_length);

  /* NOTE: It turns out there is actually no need for jinit_read_mem()
     [rdmem.c], just set the pointers in the encode loop! */

  cinfo.image_width = jpeg_out->width;
  cinfo.image_height = jpeg_out->height;
  cinfo.input_components = 3;
  cinfo.in_color_space = JCS_RGB; /* reset below if y422p or y420p*/

  jpeg_set_defaults(&cinfo);

  /* See "Raw (downsampled) image data" section in libjpeg.txt. */
  if (compress_mode == COMPRESS_Y422) {
    cinfo.raw_data_in = TRUE;
    jpeg_set_colorspace(&cinfo, JCS_YCbCr);

    cinfo.do_fancy_downsampling = FALSE;  // http://www.lavrsen.dk/svn/motion/trunk/picture.c

    cinfo.comp_info[0].h_samp_factor = 2;
    cinfo.comp_info[0].v_samp_factor = 1;

    cinfo.comp_info[1].h_samp_factor = 1;
    cinfo.comp_info[1].v_samp_factor = 1;

    cinfo.comp_info[2].h_samp_factor = 1;
    cinfo.comp_info[2].v_samp_factor = 1;
  }
  else if (compress_mode == COMPRESS_Y420) {
    cinfo.raw_data_in = TRUE;
    jpeg_set_colorspace(&cinfo, JCS_YCbCr);

    cinfo.do_fancy_downsampling = FALSE;  // http://www.lavrsen.dk/svn/motion/trunk/picture.c

    cinfo.comp_info[0].h_samp_factor = 2;
    cinfo.comp_info[0].v_samp_factor = 2;

    cinfo.comp_info[1].h_samp_factor = 1;
    cinfo.comp_info[1].v_samp_factor = 1;

    cinfo.comp_info[2].h_samp_factor = 1;
    cinfo.comp_info[2].v_samp_factor = 1;
  }

  /* Various options can be set here... */
  //cinfo.dct_method = JDCT_FLOAT;
  cinfo.dct_method = priv->dct_method; /* Ah, we have to set this up here! */

  jpeg_set_quality (&cinfo, priv->adjusted_quality, TRUE);

  jpeg_start_compress(&cinfo, TRUE);

  while (cinfo.next_scanline < cinfo.image_height) {
    if (compress_mode == COMPRESS_Y422) {
      int n;
      /* Setup necessary for raw downsampled data.  */
      JSAMPROW y[16];
      JSAMPROW cb[16];
      JSAMPROW cr[16];
      for (n=0; n < 16; n++) {
        y[n] = c1 + ((cinfo.next_scanline+n)* width);
        cb[n] = c2 + ((cinfo.next_scanline+n) * width / 2);
        cr[n] = c3 + ((cinfo.next_scanline+n) * width / 2);
      }

      JSAMPARRAY array[3] = { y, cb, cr};
      JSAMPIMAGE image = array;
      /* Need to pass enough lines at a time, see "(num_lines < lines_per_iMCU_row)" test in
         jcapistd.c */
      jpeg_write_raw_data(&cinfo, image, 16);
    }
    else if (compress_mode == COMPRESS_Y420) {
      int n;
      /* Setup necessary for raw downsampled data.  */
      // fprintf(stderr, "420 cinfo.next_scanline=%d\n", cinfo.next_scanline);
      JSAMPROW y[16];
      JSAMPROW cb[16];
      JSAMPROW cr[16];
      for (n=0; n < 16; n++) {
        y[n] = c1 + ((cinfo.next_scanline+n)* width);
        cb[n] = c2 + (((cinfo.next_scanline/2)+n) * width / 2);
        cr[n] = c3 + (((cinfo.next_scanline/2)+n) * width / 2);
      }

      JSAMPARRAY array[3] = { y, cb, cr};
      JSAMPIMAGE image = array;
      /* Need to pass enough lines at a time, see "(num_lines < lines_per_iMCU_row)" test in
         jcapistd.c */
      jpeg_write_raw_data(&cinfo, image, 16);
    }
    else {
      JSAMPROW row_pointer[1]; /* pointer to a single row */
      row_pointer[0] = c1 + (cinfo.next_scanline *  width * 3);
      jpeg_write_scanlines(&cinfo, row_pointer, 1);
    }
  }

  jpeg_finish_compress(&cinfo);

  jpeg_destroy_compress(&cinfo);

  // fprintf(stderr, "jpeg_out->encoded_length=%lu\n", jpeg_out->encoded_length);

  if (pi->outputs[OUTPUT_JPEG].destination) {
    PostData(jpeg_out, pi->outputs[OUTPUT_JPEG].destination);
  }
  else {
    /* Discard output buffer! */
    Jpeg_buffer_release(jpeg_out);
  }

  jpeg_out = 0L;                /* Clear output buffer copy. */

  /* Calculate compress time. */
  cti_getdoubletime(&t2);
  double tdiff =  (t2 - t1);

  if (pi->counter % (30) == 0) {
    dpf("tdiff=%.5f\n", tdiff);
  }


  if ((priv->time_limit > 0.0) && (tdiff > priv->time_limit)) {
    /* Compress time went over time limit, call sched_yield(), which
       should prevent starving other threads, most importantly video
       and audio capture.  Frames will back up on this thread, but on
       systems like 1.6GHz P4 which can just barely handle
       640x480@30fps, it tends to even out. */
    sched_yield();

    /* Turn down quality. */
    if (priv->adjusted_quality > 50) {
      priv->adjusted_quality -= 5;
    }

    report_time = 1;
  }
  else if (priv->adjusted_quality < priv->quality) {
    /* Ratchet quality back up, but only by 2, we don't "ping-pong" +/- 5. */
    int temp = priv->adjusted_quality + 2;
    priv->adjusted_quality = cti_min(temp, priv->quality);
    report_time = 1;
  }

  if (report_time) {
    dpf("* %.5f (q=%d)\n",
        tdiff,
        priv->adjusted_quality);
  }
}
Ejemplo n.º 15
0
static void gray_handler(Instance *pi, void *msg)
{
  MotionDetect_private *priv = (MotionDetect_private *)pi;
  Gray_buffer *gray = msg;
  int y, x, sum;
  int mask_check;

  dpf("%s:%s()\n", __FILE__, __func__);

  if (priv->accum &&
      (priv->accum->width != gray->width ||
       priv->accum->height != gray->height)) {
    /* Size has changed.  Discard accumulator buffer. */
    Gray_buffer_release(priv->accum); priv->accum = 0L;
  }

  if (!priv->accum) {
    priv->accum = Gray_buffer_new(gray->width, gray->height, 0L);
    memcpy(priv->accum->data, gray->data, gray->data_length);
  }

  if (priv->mask &&
      priv->mask->height == gray->height &&
      priv->mask->width == gray->width) {
    mask_check = 1;
  }
  else {
    mask_check = 0;
  }

  sum = 0;
  for (y=0; y < gray->height; y+=10) {
    for (x=0; x < gray->width; x+=10) {
      int offset = y * gray->width + x;

      if (mask_check) {
        if (priv->mask->data[offset] == 0) {
          continue;
        }
      }

      /* The abs() is redundant with the squaring (d*d) below, but I'm
         keeping it anyway in case I remove or change the squaring. */
      int d = abs(priv->accum->data[offset] - gray->data[offset]);
      sum += (d*d);

      /* Store with minimal IIR filtering. */
      priv->accum->data[offset] = (priv->accum->data[offset] * 2 +  gray->data[offset])/3;
    }
  }

  /* Running sum with more substantial IIR filtering. */
  priv->running_sum = (priv->running_sum * 15 + sum)/(15+1);

  if (pi->outputs[OUTPUT_CONFIG].destination) {
    char temp[64];
    snprintf(temp, 64, "%d %d", sum, priv->running_sum);
    PostData(Config_buffer_new("text", temp), pi->outputs[OUTPUT_CONFIG].destination);
  }

  if (pi->outputs[ OUTPUT_MOTIONDETECT].destination) {
    MotionDetect_result *md = MotionDetect_result_new();
    md->sum = sum;
    PostData(md, pi->outputs[ OUTPUT_MOTIONDETECT].destination);
  }

  Gray_buffer_release(gray);
  // printf("MotionDetect sum=%d\n", sum);
}
Ejemplo n.º 16
0
static void ALSACapture_tick(Instance *pi)
{
  ALSAio_private *priv = (ALSAio_private *)pi;
  int wait_flag;

  if (!priv->c.enable || !priv->c.handle) {
    wait_flag = 1;
  }
  else {
    wait_flag = 0;
  }

  Handler_message *hm;

  hm = GetData(pi, wait_flag);
  if (hm) {
    hm->handler(pi, hm->data);
    ReleaseMessage(&hm,pi);
  }

  if (!priv->c.enable || !priv->c.handle) {
    /* Not enabled or no handle, don't try to capture anything. */
    return;
  }

  /* Read a block of data. */
  int rc;
  snd_pcm_sframes_t n;
  int state;
  snd_pcm_uframes_t frames = priv->c.frames_per_io;
  int dir = 0;

  state = snd_pcm_state(priv->c.handle);

  dpf("%s: state(1)=%s\n", __func__, ALSAio_state_to_string(state));

  if (state == SND_PCM_STATE_OPEN || state == SND_PCM_STATE_SETUP) {
    /* One time capture setup. */
    fprintf(stderr, "%s: state=%s\n", __func__, ALSAio_state_to_string(state));

    rc = snd_pcm_hw_params_set_period_size_near(priv->c.handle, priv->c.hwparams, &frames, &dir);
    fprintf(stderr, "%s: set_period_size_near returns %d (frames %d:%d)\n",
            __func__, rc, (int)priv->c.frames_per_io, (int)frames);

    rc = snd_pcm_hw_params(priv->c.handle, priv->c.hwparams);
    if (rc < 0) {
      fprintf(stderr, "*** snd_pcm_hw_params %s: %s\n", s(priv->c.device), snd_strerror(rc));
    }

    state = snd_pcm_state(priv->c.handle);
    fprintf(stderr, "%s: state=%s\n", __func__, ALSAio_state_to_string(state));

    rc = snd_pcm_prepare(priv->c.handle);
    state = snd_pcm_state(priv->c.handle);
    fprintf(stderr, "%s: state=%s\n", __func__, ALSAio_state_to_string(state));

    /* Initialize running_timestamp. */
    cti_getdoubletime(&priv->c.running_timestamp);
  }

  snd_pcm_hw_params_get_period_size(priv->c.hwparams, &frames, &dir);
  int size = frames * priv->c.format_bytes * priv->c.channels;

  if (!size) {
    // This can happen if channels or format_bytes was not set...
    fprintf(stderr, "%s: size error - %ld * %d * %d\n", __func__, frames , priv->c.format_bytes , priv->c.channels);
    while (1) {
      sleep(1);
    }
  }

  if (!priv->c.rate) {
    fprintf(stderr, "%s: error - rate is 0!\n", __func__);
    while (1) {
      sleep(1);
    }
  }

  /* Allocate buffer for PCM samples. */
  uint8_t *buffer = Mem_malloc(size+1);
  buffer[size] = 0x55;

  //int val = hwparams->rate;
  //snd_pcm_hw_params_get_period_time(params, &val, &dir);

  /* Read the data. */
  n = snd_pcm_readi(priv->c.handle, buffer, frames);

  if (buffer[size] != 0x55)  {
    fprintf(stderr, "*** overwrote audio buffer!\n");
  }

  if (n != frames) {
    fprintf(stderr, "*** snd_pcm_readi %s: %s\n", s(priv->c.device), snd_strerror((int)n));
    fprintf(stderr, "*** attempting snd_pcm_prepare() to correct...\n");
    snd_pcm_prepare(priv->c.handle);
    goto out;
  }

  // fprintf(stderr, "*** read %d frames\n", (int) n);

  double calculated_period = frames*1.0/(priv->c.rate);

  priv->c.running_timestamp += calculated_period;

  double tnow;
  cti_getdoubletime(&tnow);

  /* Do coarse adjustment if necessary, this can happen after a
     system date change via ntp or htpdate. */
  if (fabs(priv->c.running_timestamp - tnow) > 5.0) {
    fprintf(stderr, "coarse timestamp adjustment, %.3f -> %.3f\n",
            priv->c.running_timestamp, tnow);
    priv->c.running_timestamp = tnow;
  }

  /* Adjust running timestamp if it slips too far either way.  Smoothing, I guess. */
  if (priv->c.running_timestamp - tnow > calculated_period) {
    dpf("priv->c.rate=%d,  %.3f - %.3f (%.5f) > %.5f : - running timestamp\n",
        priv->c.rate,
        priv->c.running_timestamp, tnow,
        (tnow - priv->c.running_timestamp),
        calculated_period);
    priv->c.running_timestamp -= (calculated_period/2.0);
  }
  else if (tnow - priv->c.running_timestamp > calculated_period) {
    dpf("priv->c.rate=%d, %.3f - %.3f (%.5f) > %.5f : + running timestamp\n",
        priv->c.rate,
        tnow, priv->c.running_timestamp,
        (tnow - priv->c.running_timestamp),
        calculated_period);
    priv->c.running_timestamp += (calculated_period/2.0);
  }

  int buffer_bytes = n * priv->c.format_bytes * priv->c.channels;

  if (pi->outputs[OUTPUT_AUDIO].destination) {
    Audio_buffer * audio = Audio_buffer_new(priv->c.rate,
                                            priv->c.channels, priv->c.atype);
    Audio_buffer_add_samples(audio, buffer, buffer_bytes);
    audio->timestamp = priv->c.running_timestamp;
    //fprintf(stderr, "posting audio buffer %d bytes\n", buffer_bytes);
    PostData(audio, pi->outputs[OUTPUT_AUDIO].destination);
    /* TESTING: disable after posting once */
    //pi->outputs[OUTPUT_AUDIO].destination = NULL;
  }

  if (pi->outputs[OUTPUT_WAV].destination) {
    Wav_buffer *wav = Wav_buffer_new(priv->c.rate, priv->c.channels, priv->c.format_bytes);
    wav->timestamp = priv->c.running_timestamp;

    dpf("%s allocated wav @ %p\n", __func__, wav);
    wav->data = buffer;  buffer = 0L;   /* Assign, do not free below. */
    wav->data_length = buffer_bytes;
    Wav_buffer_finalize(wav);

    if (priv->c.analyze) {
      analyze_rate(priv, wav);
    }

    PostData(wav, pi->outputs[OUTPUT_WAV].destination);
    static int x = 0;
    wav->seq = x++;
    wav = 0L;
  }

 out:
  if (buffer) {
    Mem_free(buffer);
  }
}
Ejemplo n.º 17
0
  /** 
   * Create output objects 
   */
  void UserCreateOutputObjects()
  {
    fList = new TList;
    fList->SetOwner();
    fList->SetName("triggerSums");

    // Double_t mb[] = { 0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11 };
    // Int_t    nM   = 10;
    TAxis eAxis(200, -4, 6);
    TAxis pAxis(40, 0, 2*TMath::Pi());

    fData = new TH2D("data", "Cache", 
		     eAxis.GetNbins(), eAxis.GetXmin(), eAxis.GetXmax(), 
		     pAxis.GetNbins(), pAxis.GetXmin(), pAxis.GetXmax());
    fData->SetDirectory(0);
    fData->SetXTitle("#eta");
    fData->SetYTitle("#varphi [radians]");
    fData->SetZTitle("N_{ch}(#eta,#varphi)");
    fData->Sumw2();
    
    fM = new TH1D("m", "Distribution of N_{ch}|_{|#eta|<1}", kMaxN+1,0,kMaxN+1);
    fM->SetXTitle("N_{ch}|_{|#eta|<1}");
    fM->SetYTitle("Events");
    fM->SetFillColor(kRed+1);
    fM->SetFillStyle(3001);
    fM->SetDirectory(0);
    fList->Add(fM);

    for (Int_t i = 0; i <= kMaxN; i++) { 
      TString lbl;
      if (i == 0)          lbl = "all";
      else if (i == kMaxN) lbl = Form("%d+",i-1);
      else                 lbl = Form("<%d",i);
      fM->GetXaxis()->SetBinLabel(i+1, lbl);
    }

    fTriggers = new TH1I("triggers", "Triggers", 8, -.5, 7.5);
    fTriggers->SetDirectory(0);
    fTriggers->GetXaxis()->SetBinLabel(1, "INEL (MC)");
    fTriggers->GetXaxis()->SetBinLabel(2, "INEL (ESD)");
    fTriggers->GetXaxis()->SetBinLabel(3, "INEL & N_{cluster}>0 (MC)");
    fTriggers->GetXaxis()->SetBinLabel(4, "INEL & N_{cluster}>0 (ESD)");
    fTriggers->GetXaxis()->SetBinLabel(5, "INEL>0 (MC)");
    fTriggers->GetXaxis()->SetBinLabel(6, "INEL>0 (ESD)");
    fTriggers->GetXaxis()->SetBinLabel(7, "NSD (MC)");
    fTriggers->GetXaxis()->SetBinLabel(8, "NSD (ESD)");
    fTriggers->SetFillColor(kYellow+1);
    fTriggers->SetFillStyle(3001);
    fList->Add(fTriggers);

    fVertexESD = new TH1D("vertexESD", "ESD vertex distribution", 
			  fVertexAxis.GetNbins(), 
			  fVertexAxis.GetXmin(), 
			  fVertexAxis.GetXmax());
    fVertexESD->SetDirectory(0);
    fVertexESD->SetFillColor(kRed+1);
    fVertexESD->SetFillStyle(3001);
    fVertexESD->SetXTitle("v_{z} [cm]");
    fVertexESD->SetYTitle("P(v_{z})");
    fList->Add(fVertexESD);

    fVertexMC = new TH1D("vertexMC", "MC vertex distribution", 
			  fVertexAxis.GetNbins(), 
			  fVertexAxis.GetXmin(), 
			  fVertexAxis.GetXmax());
    fVertexMC->SetDirectory(0);
    fVertexMC->SetFillColor(kBlue+1);
    fVertexMC->SetFillStyle(3001);
    fVertexMC->SetXTitle("v_{z} [cm]");
    fVertexMC->SetYTitle("P(v_{z})");
    fList->Add(fVertexMC);

    fInel.CreateObjects(fList, fM, fData);
    fInelGt0.CreateObjects(fList, fM, fData);
    fNSD.CreateObjects(fList, fM, fData);
    fNClusterGt0.CreateObjects(fList, fM, fData);


    fInspector.DefineOutput(fList);
    fInspector.Init(fVertexAxis);

    PostData(1, fList);
  }
Ejemplo n.º 18
0
  /** 
   * Event processing 
   */
  void UserExec(Option_t*) 
  {
    // Get the input data - MC event
    AliMCEvent*  mcEvent = MCEvent();
    if (!mcEvent) { 
      AliWarning("No MC event found");
      return;
    }
    
    // Get the input data - ESD event
    AliESDEvent* esd = dynamic_cast<AliESDEvent*>(InputEvent());
    if (!esd) { 
      AliWarning("No ESD event found for input event");
      return;
    }

    if (fFirstEvent && esd->GetESDRun()) {
      fInspector.ReadRunDetails(esd);

      AliInfo(Form("Initializing with parameters from the ESD:\n"
		   "         AliESDEvent::GetBeamEnergy()   ->%f\n"
		   "         AliESDEvent::GetBeamType()     ->%s\n"
		   "         AliESDEvent::GetCurrentL3()    ->%f\n"
		   "         AliESDEvent::GetMagneticField()->%f\n"
		   "         AliESDEvent::GetRunNumber()    ->%d\n",
		   esd->GetBeamEnergy(),
		   esd->GetBeamType(),
		   esd->GetCurrentL3(),
		   esd->GetMagneticField(),
		   esd->GetRunNumber()));
      
      fFirstEvent = false;
    }

    // Get the particle stack 
    AliStack* stack = mcEvent->Stack();

    // Some variables 
    UInt_t   triggers; // Trigger bits
    Bool_t   lowFlux;  // Low flux flag
    UShort_t iVz;      // Vertex bin from ESD
    Double_t vZ;       // Z coordinate from ESD
    Double_t cent;     // Centrality 
    UShort_t iVzMc;    // Vertex bin from MC
    Double_t vZMc;     // Z coordinate of IP vertex from MC
    Double_t b;        // Impact parameter
    Int_t    nPart;    // Number of participants 
    Int_t    nBin;     // Number of binary collisions 
    Double_t phiR;     // Reaction plane from MC
    UShort_t nClusters;// Number of clisters 
    // Process the data 
    Int_t retESD = fInspector.Process(esd, triggers, lowFlux, iVz, vZ, cent,
				      nClusters);
    Int_t retMC  = fInspector.ProcessMC(mcEvent, triggers, iVzMc, 
					vZMc, b, nPart, nBin, phiR);

    Bool_t hasESDVtx = retESD == AliFMDEventInspector::kOk;
    Bool_t hasMCVtx  = retMC  == AliFMDEventInspector::kOk;
    if (hasESDVtx) fVertexESD->Fill(vZ);
    if (hasMCVtx)  fVertexMC->Fill(vZMc);

    Bool_t isMcInel = true; // (triggers & AliAODForwardMult::kB);
    Bool_t isMcNSD  = (triggers & AliAODForwardMult::kMCNSD);

    Int_t mESD = 0;
    const AliMultiplicity* spdmult = esd->GetMultiplicity();
    if (!spdmult) {
      AliWarning("No SPD multiplicity");
    }
    else { 
      // Check if we have one or more tracklets 
      // in the range -1 < eta < 1 to set the INEL>0 
      // trigger flag. 
      Int_t n = spdmult->GetNumberOfTracklets();
      for (Int_t j = 0; j < n; j++) 
	if(TMath::Abs(spdmult->GetEta(j)) < 1) mESD++;
    }

    // Reset cache 
    fData->Reset();
    Int_t mMC = 0; // Number of particles in |eta|<1

    // Loop over all tracks 
    Int_t nTracks = mcEvent->GetNumberOfTracks();
    for (Int_t iTr = 0; iTr < nTracks; iTr++) { 
      AliMCParticle* particle = 
	static_cast<AliMCParticle*>(mcEvent->GetTrack(iTr));
    
      // Check the returned particle 
      if (!particle) continue;

      // Check if this charged and a primary 
      Bool_t isCharged = particle->Charge() != 0;
      Bool_t isPrimary = stack->IsPhysicalPrimary(iTr);

      if (!isCharged || !isPrimary) continue;

      
      // Fill (eta,phi) of the particle into histograsm for b
      Double_t eta = particle->Eta();
      Double_t phi = particle->Phi();
      
      fData->Fill(eta, phi);
      if (TMath::Abs(eta) <= 1) mMC++;
    }
    Int_t m = mESD;
    if (fTrackletRequirement == kMC) m = mMC;
    fM->Fill(m);

    bool isMcInelGt0 = isMcInel && (mMC > 0);
    
    bool hasVertex   = true;
    if (fVertexRequirement & kMC)  hasVertex = hasVertex && hasMCVtx;
    if (fVertexRequirement & kESD) hasVertex = hasVertex && hasESDVtx;

    if (isMcInel) {
      fTriggers->Fill(0);
      bool triggered = (triggers & AliAODForwardMult::kInel);
      if (triggered) fTriggers->Fill(1);
      fInel.AddEvent(triggered, hasVertex, m, fData);
    }
    if (isMcInel) { // && nClusters > 0) {
      fTriggers->Fill(2);
      bool triggered = (triggers & AliAODForwardMult::kNClusterGt0);
      if (triggered) fTriggers->Fill(3);
      fNClusterGt0.AddEvent(triggered, hasVertex, m, fData);
    }
    if (isMcInelGt0) {
      fTriggers->Fill(4);
      bool triggered = (triggers & AliAODForwardMult::kInelGt0);
      if (triggered) fTriggers->Fill(5);
      fInelGt0.AddEvent(triggered, hasVertex, m, fData);
    }
    if (isMcNSD) {
      fTriggers->Fill(6);
      bool triggered = (triggers & AliAODForwardMult::kNSD);
      if (triggered) fTriggers->Fill(7);
      fNSD.AddEvent(triggered, hasVertex, m, fData);
    }
    PostData(1, fList);
  }
Ejemplo n.º 19
0
static void Y422p_handler(Instance *pi, void *msg)
{
  VFilter_private *priv = (VFilter_private *)pi;
  YUV422P_buffer *y422p_in = msg;
  YUV422P_buffer *y422p_out = 0L;
  YUV422P_buffer *y422p_src = 0L;

  /* FIXME: The crop operations could be done by calculations, followed by only a single copy operation. */
  if (priv->top_crop) {
    y422p_src = y422p_out ? y422p_out : y422p_in;
    y422p_out = YUV422P_buffer_new(y422p_src->width, y422p_src->height - priv->top_crop, &y422p_src->c);
    memcpy(y422p_out->y, y422p_src->y+(y422p_src->width * priv->top_crop), y422p_out->width*y422p_out->height);
    memcpy(y422p_out->cb, y422p_src->cb+(y422p_src->width * priv->top_crop)/2, y422p_out->width*y422p_out->height/2);
    memcpy(y422p_out->cr, y422p_src->cr+(y422p_src->width * priv->top_crop)/2, y422p_out->width*y422p_out->height/2);
    YUV422P_buffer_release(y422p_src);
  }

  if (priv->bottom_crop) {
    y422p_src = y422p_out ? y422p_out : y422p_in;
    y422p_out = YUV422P_buffer_new(y422p_src->width, y422p_src->height - priv->bottom_crop, &y422p_src->c);
    memcpy(y422p_out->y, y422p_src->y, y422p_out->width*y422p_out->height);
    memcpy(y422p_out->cb, y422p_src->cb, y422p_out->width*y422p_out->height/2);
    memcpy(y422p_out->cr, y422p_src->cr, y422p_out->width*y422p_out->height/2);
    YUV422P_buffer_release(y422p_src);
  }

  if (priv->left_right_crop) {
    y422p_src = y422p_out ? y422p_out : y422p_in;
    if (priv->left_right_crop > y422p_src->width) {
      fprintf(stderr, "left_right_crop value %d is wider than input %d\n",
              priv->left_right_crop, y422p_src->width);
    }
    else {
      y422p_out = YUV422P_buffer_new(y422p_src->width - (priv->left_right_crop * 2), y422p_src->height,
                                   &y422p_src->c);
      memcpy(y422p_out->y, y422p_src->y+priv->left_right_crop, y422p_out->width);
      memcpy(y422p_out->cb, y422p_src->cb+(priv->left_right_crop/2), y422p_out->width/2);
      memcpy(y422p_out->cr, y422p_src->cr+(priv->left_right_crop/2), y422p_out->width/2);
      YUV422P_buffer_release(y422p_src);
    }
  }

  if (priv->y3blend) {
    /* Horizontal blend, for smoothing out saturation artifact in Y channel. */
    y422p_src = y422p_out ? y422p_out : y422p_in;
    y422p_out = YUV422P_buffer_new(y422p_src->width, y422p_src->height, &y422p_src->c);
    single_y3blend(y422p_src->y, y422p_out->y, y422p_src->width, y422p_src->height);
    memcpy(y422p_out->cb, y422p_src->cb, y422p_src->width*y422p_src->height/2);
    memcpy(y422p_out->cr, y422p_src->cr, y422p_src->width*y422p_src->height/2);
    YUV422P_buffer_release(y422p_src);
  }

  if (priv->adaptive3point) {
    /* Another way to remote saturation artifacts. */
    y422p_src = y422p_out ? y422p_out : y422p_in;
    y422p_out = YUV422P_buffer_new(y422p_src->width, y422p_src->height, &y422p_src->c);
    adaptive3point_filter(y422p_src, y422p_out);
    YUV422P_buffer_release(y422p_src);
  }

  if (priv->horizontal_filter_enabled) {
    /* Horizontal filter. */
    y422p_src = y422p_out ? y422p_out : y422p_in;
    y422p_out = YUV422P_buffer_new(y422p_src->width, y422p_src->height, &y422p_src->c);
    single_horizontal_filter(priv, y422p_src->y, y422p_out->y, y422p_src->width, y422p_src->height);
    memcpy(y422p_out->cb, y422p_src->cb, y422p_src->width*y422p_src->height/2);
    memcpy(y422p_out->cr, y422p_src->cr, y422p_src->width*y422p_src->height/2);
    YUV422P_buffer_release(y422p_src);
  }

  if (priv->linear_blend) {
    /* Vertical blend, for cheap de-interlacing. */
    y422p_src = y422p_out ? y422p_out : y422p_in;
    y422p_out = YUV422P_buffer_new(y422p_src->width, y422p_src->height, &y422p_src->c);
    single_121_linear_blend(y422p_src->y, y422p_out->y, y422p_src->width, y422p_src->height);
    single_121_linear_blend(y422p_src->cb, y422p_out->cb, y422p_src->width/2, y422p_src->height);
    single_121_linear_blend(y422p_src->cr, y422p_out->cr, y422p_src->width/2, y422p_src->height);
    YUV422P_buffer_release(y422p_src);
  }

  if (priv->trim) {
    /* Smooth out low bits to make compression easier. */
    y422p_src = y422p_out ? y422p_out : y422p_in;
    y422p_out = YUV422P_buffer_new(y422p_src->width, y422p_src->height, &y422p_src->c);
    single_trim(priv, y422p_src->y, y422p_out->y, y422p_src->width, y422p_src->height);
    YUV422P_buffer_release(y422p_src);
  }

  if (!y422p_out) {
    y422p_out = y422p_in;
  }

  if (priv->field_split) {
    y422p_out->c.interlace_mode = IMAGE_FIELDSPLIT_TOP_FIRST;
    single_field_split(y422p_out->y, y422p_out->width, y422p_out->height);
    single_field_split(y422p_out->cb, y422p_out->width/2, y422p_out->height);
    single_field_split(y422p_out->cr, y422p_out->width/2, y422p_out->height);
  }

  if (pi->outputs[OUTPUT_YUV422P].destination) {
    PostData(y422p_out, pi->outputs[OUTPUT_YUV422P].destination);
  }
  else {
    YUV422P_buffer_release(y422p_out);
  }

}