Exemple #1
0
// channel if from 0
static void menu_set_adc_direction(u8 channel) {
    u16 adc, calm;

    // for channel 2 use throttle, for others steering
    if (channel == 1) {
	adc = adc_throttle_ovs;
	calm = cg.calib_throttle_mid;
    }
    else {
	adc = adc_steering_ovs;
	calm = cg.calib_steering_mid;
    }

    // check steering firstly
    if (adc_steering_ovs < ((cg.calib_steering_mid - 40) << ADC_OVS_SHIFT))
	menu_adc_direction = 0;
    else if (adc_steering_ovs > ((cg.calib_steering_mid + 40) << ADC_OVS_SHIFT))
	menu_adc_direction = 1;

    // then check throttle
    if (adc_throttle_ovs < ((cg.calib_throttle_mid - 40) << ADC_OVS_SHIFT))
	menu_adc_direction = 0;
    else if (adc_throttle_ovs > ((cg.calib_throttle_mid + 40) << ADC_OVS_SHIFT))
	menu_adc_direction = 1;

    // and then CH3 button
    else if (btn(BTN_CH3))  menu_adc_direction ^= 1;

    // if this channel is using forced values, set it to left/right
    if (menu_force_value_channel)
	menu_force_value = menu_adc_direction ? PPM(500) : PPM(-500);
}
Exemple #2
0
void menu_et_function_set_from_linear(u8 n, s16 lin_val) {
    et_functions_s *etf = &et_functions[n];
    s16 val;
    if (n == 0 || n >= ET_FUNCTIONS_SIZE)  return;  // OFF or bad
    // map lin_val between min and max
    val = (s16)((s32)(lin_val + PPM(500)) * (etf->max - etf->min + 1)
		/ PPM(1000)) + etf->min;
    if (val > etf->max)  val = etf->max;	// lin_val was full right
    AVAL(val);
}
Exemple #3
0
Fichier : ppm.c Projet : eknl/gt3b
// calculate length of SYNC signal (substract previous channel values)
// also sets flag for ppm_interrupt to use new values
// also starts TIM3 at first call
void ppm_calc_sync(void) {
    // ARR must be set to computed value - 1, that is why we are substracting
    //   5000, it is quicker way to "add 5000" and then substract 1 from result
    *(u16 *)(&ppm_values[0]) =
	(u16)(((10 * (u32)PPM_FRAME_LENGTH - ppm_microsecs01) * PPM_MUL_SYNC
	       - PPM(500)) / PPM(1000));
    ppm_microsecs01 = 0;
    // for first ppm values, enable timer
    if (ppm_enabled)  return;
    ppm_enabled = 1;
    BSET(TIM3_EGR, 0);    // generate update event
    BSET(TIM3_CR1, 0);    // enable timer
}
  ideal FrbAlexanderDual(const ideal& I, ConstRefPPMonoidElem pp)
  {
    MustHaveMonomialGens(I, "FrbAlexanderDual");
    const SparsePolyRing polyRing = RingOf(I);
    if (PPM(polyRing) != owner(pp))
      CoCoA_ERROR(ERR::MixedPPMs, "FrbAlexanderDual");

    const std::size_t count = NumIndets(polyRing);

    Frobby::Ideal frobbyIdeal(count);
    ToFrobbyIdeal(frobbyIdeal, I);

    FrobbyMonomialIdealConsumer consumer(polyRing);

    // Set up the point to dualize on. 
    WrappedMpzT* exponentVector = new WrappedMpzT[count];
    try
    {
      for (size_t indep = 0; indep < count; ++indep)
        mpz_set(exponentVector[indep].getMpzT(), mpzref(BigExponent(pp, indep)));

      // Compute Alexander dual using Frobby.
      Frobby::alexanderDual(frobbyIdeal, (mpz_t*)exponentVector, consumer);
    }
    catch (...)
    {
      delete[] exponentVector;
      throw;
    }

    delete[] exponentVector;

    return consumer.getIdealsRef().back();
  }
Exemple #5
0
Fichier : ppm.c Projet : eknl/gt3b
void ppm_set_value(u8 channel, u16 microsec01) {
    ppm_microsecs01 += microsec01;
    // ARR must be set to computed value - 1, that is why we are substracting
    //   5000, it is quicker way to "add 5000" and then substract 1 from result
    *(u16 *)(&ppm_values[(u8)(channel << 1)]) =
	(u16)(((u32)microsec01 * PPM_MUL_SERVO - PPM(500)) / PPM(1000));
    // for first channel, when we are still in HW SYNC generate, update
    //   new ARR values to get it applied now
    if (channel == 1) {
	sim();
	if (ppm_channel2 == 4) {
	    // next will be channel2, so we have channel1 in ARR now
	    TIM3_ARRH = ppm_values[2];
	    TIM3_ARRL = ppm_values[3];
	}
	rim();
    }
}
Exemple #6
0
glm::mat4 Camera::PerspectiveProjectionMatrix()
{
    //Get the dimension of the image plane first
    const float TOP = near_clip * tan(fovy*DEG2RAD/2);
    const float BOTTOM = - TOP;
    const float RIGHT = TOP*aspect;
    const float LEFT = -RIGHT;

    //constrcut the matrix by contructing the 4 column vectors
    glm::vec4 VM_1(2*near_clip /(RIGHT - LEFT), 0, 0, 0);
    glm::vec4 VM_2(0, 2*near_clip /(TOP - BOTTOM), 0, 0);
    glm::vec4 VM_3(-(RIGHT + LEFT)/(RIGHT - LEFT), -(TOP + BOTTOM)/(TOP - BOTTOM), far_clip/(far_clip - near_clip), 1);
    glm::vec4 VM_4(0, 0, -(far_clip * near_clip)/(far_clip - near_clip), 0);
    glm::mat4 PPM (VM_1, VM_2, VM_3, VM_4);
    return PPM;
}
Exemple #7
0
  // Simple rather than efficient (esp. the call to eval)
  std::vector<RingElem> BM_generic(const SparsePolyRing& P, const ConstMatrixView& pts)
  {
    if (CoeffRing(P) != RingOf(pts)) CoCoA_ERROR(ERR::MixedRings, "Buchberger-Moeller");
    if (NumIndets(P) < NumCols(pts)) CoCoA_ERROR(ERR::IncompatDims, "Buchberger-Moeller");

    const long NumPts = NumRows(pts);
    const long dim = NumCols(pts);
    const ring k = CoeffRing(P);

    vector<RingElem> GB;
    const PPMonoid TT = PPM(P);
    QBGenerator QBG(TT);
    QBG.myCornerPPIntoQB(one(TT));
    matrix M = NewDenseMat(k, 1, NumPts);
    // Fill first row with 1:
    for (int i=0; i<NumPts; ++i) SetEntry(M,0,i, 1);

    // The next loop removes the last indets from consideration.
    for (int i=dim; i < NumIndets(TT); ++i)
      QBG.myCornerPPIntoAvoidSet(indet(TT,i));

    while (!QBG.myCorners().empty())
    {
      const PPMonoidElem t = QBG.myCorners().front();
      const vector<RingElem> v = eval(t, pts);
      ConstMatrixView NewRow = RowMat(v);
      const matrix a = LinSolve(transpose(M), transpose(NewRow));
      if (IsValidSolution(a))
      {
        QBG.myCornerPPIntoAvoidSet(t);
        RingElem NewGBElem = monomial(P, one(k), t);
        const vector<PPMonoidElem>& QB =  QBG.myQB();
        for (int i=0; i < NumRows(M); ++i)
          NewGBElem -= monomial(P, a(i,0), QB[i]);
        GB.push_back(NewGBElem);
      }
      else
      {
        QBG.myCornerPPIntoQB(t);
        M = NewDenseMat(ConcatVer(M, NewRow));
      }
    }
    return GB;
  }
Exemple #8
0
  std::vector<RingElem> BM_modp(const SparsePolyRing& P, const ConstMatrixView& pts)
  {
    ring Fp = CoeffRing(P);

    const int NumPts = NumRows(pts);
    const int NumVars = NumCols(pts);
    const long p = ConvertTo<long>(characteristic(Fp));
    FF FFp = FFctor(p);
    FFselect(FFp);
    FFelem** points_p = (FFelem**)malloc(NumPts*sizeof(FFelem*));
    for (int i=0; i < NumPts; ++i)
    {
      points_p[i] = (FFelem*)malloc(NumVars*sizeof(FFelem));
      for (int j=0; j < NumVars; ++j)
      {
        points_p[i][j] = ConvertTo<FFelem>(LeastNNegRemainder(ConvertTo<BigInt>(pts(i,j)), p));
      }
    }
    pp_cmp_PPM = &PPM(P);
    const BM modp = BM_affine_mod_p(NumVars, NumPts, points_p, pp_cmp);
    if (modp == NULL) return std::vector<RingElem>(); // empty list means error

    const int GBsize = modp->GBsize;
    std::vector<RingElem> GB(GBsize);
    vector<long> expv(NumVars);
    for (int i=0; i < GBsize; ++i)
    {
      for (int var = 0; var < NumVars; ++var)
        expv[var] = modp->pp[modp->GB[i]][var];
      RingElem GBelem = monomial(P, 1, expv);
      for (int j=0; j < NumPts; ++j)
      {
        const int c = modp->M[modp->GB[i]][j+NumPts];
        if (c == 0) continue;
        for (int var = 0; var < NumVars; ++var)
          expv[var] = modp->pp[modp->sep[j]][var];
        GBelem += monomial(P, c, expv);
      }
      GB[i] = GBelem;
    }
    BM_dtor(modp);
    return GB;
  }
Exemple #9
0
  std::vector<RingElem> BM_QQ(const SparsePolyRing& P, const ConstMatrixView& pts_in)
  {
    const long NumPts = NumRows(pts_in);
    const long dim = NumCols(pts_in);
    matrix pts = NewDenseMat(RingQQ(), NumPts, dim);
    for (long i=0; i < NumPts; ++i)
      for (long j=0; j < dim; ++j)
      {
        BigRat q;
        if (!IsRational(q, pts_in(i,j))) throw 999;
        SetEntry(pts,i,j, q);
      }

    // Ensure input pts have integer coords by using
    // scale factors for each indet.
    vector<BigInt> ScaleFactor(dim, BigInt(1));
    for (long j=0; j < dim; ++j)
      for (long i=0; i < NumPts; ++i)
        ScaleFactor[j] = lcm(ScaleFactor[j], ConvertTo<BigInt>(den(pts(i,j))));

    mpz_t **points = (mpz_t**)malloc(NumPts*sizeof(mpz_t*));
    for (long i=0; i < NumPts; ++i)
    {
      points[i] = (mpz_t*)malloc(dim*sizeof(mpz_t));
      for (long j=0; j < dim; ++j) mpz_init(points[i][j]);
      for (long j=0; j < dim; ++j)
      {
        mpz_set(points[i][j], mpzref(ConvertTo<BigInt>(ScaleFactor[j]*pts(i,j))));
      }
    }


    BMGB char0; // these will be "filled in" by BM_affine below
    BM modp;    //
            
    pp_cmp_PPM = &PPM(P); // not threadsafe!
    BM_affine(&char0, &modp, dim, NumPts, points, pp_cmp); // THIS CALL DOES THE REAL WORK!!!
    pp_cmp_PPM = NULL;
    for (long i=NumPts-1; i >=0 ; --i)
    {
      for (long j=0; j < dim; ++j) mpz_clear(points[i][j]);
      free(points[i]);
    }
    free(points);

    if (modp == NULL) { if (char0 != NULL) BMGB_dtor(char0); CoCoA_ERROR("Something went wrong", "BM_QQ"); }

    // Now extract the answer...
    const int GBsize = char0->GBsize;
    std::vector<RingElem> GB(GBsize);
    const long NumVars = dim;
    vector<long> expv(NumVars); // buffer for creating monomials
    for (int i=0; i < GBsize; ++i)
    {
      BigInt denom(1); // scale factor needed to make GB elem monic.
      for (int var = 0; var < NumVars; ++var)
      {
        expv[var] = modp->pp[modp->GB[i]][var];
        denom *= power(ScaleFactor[var], expv[var]);
      }
      RingElem GBelem = monomial(P, 1, expv);

      for (int j=0; j < NumPts; ++j)
      {
        if (mpq_sgn(char0->GB[i][j])==0) continue;
        BigRat c(char0->GB[i][j]);
        for (int var = 0; var < NumVars; ++var)
        {
          expv[var] = modp->pp[modp->sep[j]][var];
          c *= power(ScaleFactor[var], expv[var]);
        }
        GBelem += monomial(P, c/denom, expv);
      }
      GB[i] = GBelem;
    }
    BMGB_dtor(char0);
    BM_dtor(modp);
    return GB;
    // ignoring separators for the moment
  }
Exemple #10
0
 // Not efficient, efficiency not needed at the moment
 RingElem Facet2RingElem(const SparsePolyRing& theP,const DynamicBitset& b)
 {
   return monomial(theP, 1, NewPP(PPM(theP), b));
 }//Facet2RingElem
Exemple #11
0
/*! \brief Main function. Execution starts here.
 *
 * \retval 42 Fatal error.
 */
int main(void)
{
  uint32_t iter=0;
  uint32_t cs2200_out_freq=11289600;
  static bool b_sweep_up=true;
  static uint32_t freq_step=0;

  // USART options.
  static usart_serial_options_t USART_SERIAL_OPTIONS =
  {
    .baudrate     = USART_SERIAL_EXAMPLE_BAUDRATE,
    .charlength   = USART_SERIAL_CHAR_LENGTH,
    .paritytype   = USART_SERIAL_PARITY,
    .stopbits     = USART_SERIAL_STOP_BIT
  };

  // Initialize the TWI using the internal RCOSC
  init_twi(AVR32_PM_RCOSC_FREQUENCY);

  // Initialize the CS2200 and produce a default frequency.
  cs2200_setup(11289600, FOSC0);

  sysclk_init();

  // Initialize the board.
  // The board-specific conf_board.h file contains the configuration of the board
  // initialization.
  board_init();

  // Initialize the TWI
  init_twi(sysclk_get_pba_hz());

  // Initialize Serial Interface using Stdio Library
  stdio_serial_init(USART_SERIAL_EXAMPLE,&USART_SERIAL_OPTIONS);

  // Initialize the HMatrix.
  init_hmatrix();

  print_dbg("\r\nCS2200 Example\r\n");

  // Generate a 12.288 MHz frequency out of the CS2200.
  print_dbg("Output 12.288 MHz\r\n");
  cs2200_freq_clk_out(_32_BITS_RATIO(12288000, FOSC0));
  cpu_delay_ms( 10000, sysclk_get_cpu_hz());

  // Generate a 11.2896 MHz frequency out of the CS2200.
  print_dbg("Output 11.2896 MHz\r\n");
  cs2200_freq_clk_out(_32_BITS_RATIO(cs2200_out_freq, FOSC0));
  cpu_delay_ms( 10000, sysclk_get_cpu_hz());

  print_dbg("Sweep from 11.2896 MHz steps of 100 PPM\r\n");
  freq_step = PPM(cs2200_out_freq, 100);

  while(1)
  {
    uint32_t ratio;

    if(b_sweep_up)
    {
      if( iter<=10 )
      {
        print_dbg("Add 100 PPM\r\n");
        iter++;
        cs2200_out_freq += freq_step;
        ratio = _32_BITS_RATIO(cs2200_out_freq, FOSC0);
        cs2200_freq_clk_adjust((uint16_t)ratio);
        cpu_delay_ms( 1000, sysclk_get_cpu_hz());
        while( twi_is_busy() );
      }
      else
        b_sweep_up=false;
    }

    if(!b_sweep_up)
    {
      if( iter>0 )
      {
        print_dbg("Sub 100 PPM\r\n");
        iter--;
        cs2200_out_freq -= freq_step;
        ratio = _32_BITS_RATIO(cs2200_out_freq, FOSC0);
        cs2200_freq_clk_adjust((uint16_t)ratio);
        cpu_delay_ms( 1000, sysclk_get_cpu_hz());
        while( twi_is_busy() );
      }
      else
        b_sweep_up=true;
    }
  }
}
	int CameraHal::CapturePicture()
	{
		int image_width, image_height, preview_width, preview_height;
		unsigned long base, offset;
		
		struct v4l2_buffer buffer; // for VIDIOC_QUERYBUF and VIDIOC_QBUF
		struct v4l2_format format;
		struct v4l2_buffer cfilledbuffer; // for VIDIOC_DQBUF
		struct v4l2_requestbuffers creqbuf; // for VIDIOC_REQBUFS and VIDIOC_STREAMON and VIDIOC_STREAMOFF

		sp<MemoryBase> 		mPictureBuffer;
		sp<MemoryBase> 		mFinalPictureBuffer;
		sp<MemoryHeapBase>  mJPEGPictureHeap;
		sp<MemoryBase>		mJPEGPictureMemBase;
#if OMAP_SCALE
		sp<MemoryHeapBase> 	TempHeapBase;
		sp<MemoryBase>	 	TempBase;
		sp<IMemoryHeap> 	TempHeap;
#endif

		ssize_t newoffset;
		size_t newsize;

		mCaptureFlag = true;
		int jpegSize;
		void* outBuffer;
		int err, i;
		int err_cnt = 0;

		int EXIF_Data_Size = 0;
		int ThumbNail_Data_Size = 0;
		unsigned char* pExifBuf = new unsigned char[65536];	//64*1024

		int twoSecondReviewMode = getTwoSecondReviewMode();
		int orientation = getOrientation();

		LOG_FUNCTION_NAME

		if (CameraSetFrameRate())
		{
			LOGE("Error in setting Camera frame rate\n");
			return -1;
		}

		HAL_PRINT("\n\n\n PICTURE NUMBER =%d\n\n\n",++pictureNumber);

		mParameters.getPictureSize(&image_width, &image_height);
		mParameters.getPreviewSize(&preview_width, &preview_height);	
		HAL_PRINT("Picture Size: Width = %d \t Height = %d\n", image_width, image_height);
		HAL_PRINT("Preview Size: Width = %d \t Height = %d\n", preview_width, preview_height);

#if OPEN_CLOSE_WORKAROUND
		close(camera_device);
		camera_device = open(VIDEO_DEVICE, O_RDWR);
		if (camera_device < 0) 
		{
			LOGE ("!!!!!!!!!FATAL Error: Could not open the camera device: %s!!!!!!!!!\n",
					strerror(errno) );
		}
#endif

		if(mCamera_Mode == CAMERA_MODE_JPEG)
		{
			int jpeg_width = GetJPEG_Capture_Width();
			int jpeg_height = GetJPEG_Capture_Height();
			capture_len = jpeg_width * jpeg_height * 2;
		}
		else
		{
			capture_len = image_width * image_height * 2;
		}

		if (capture_len & 0xfff)
		{
			capture_len = (capture_len & 0xfffff000) + 0x1000;
		}

		HAL_PRINT("pictureFrameSize = 0x%x = %d\n", capture_len, capture_len);

		mPictureHeap = new MemoryHeapBase(capture_len);

		base = (unsigned long)mPictureHeap->getBase();
		base = (base + 0xfff) & 0xfffff000;
		offset = base - (unsigned long)mPictureHeap->getBase();

		/* set size & format of the video image */
		format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
		format.fmt.pix.width = image_width;
		format.fmt.pix.height = image_height;
		
		if(mCamera_Mode == CAMERA_MODE_JPEG)
			format.fmt.pix.pixelformat = PIXEL_FORMAT_JPEG;
		else
			format.fmt.pix.pixelformat = PIXEL_FORMAT;

		if (ioctl(camera_device, VIDIOC_S_FMT, &format) < 0)
		{
			LOGE ("Failed to set VIDIOC_S_FMT.\n");
			return -1;
		}
		
#if OMAP_SCALE
        if(mCameraIndex == VGA_CAMERA && mCamMode != VT_MODE) {
            if(orientation == 0 || orientation == 180) {
                struct v4l2_control vc;            
                CLEAR(vc);
                vc.id = V4L2_CID_FLIP;                
                vc.value = CAMERA_FLIP_MIRROR;
                if (ioctl (camera_device, VIDIOC_S_CTRL, &vc) < 0) {
                    LOGE("V4L2_CID_FLIP fail!\n");
                    return UNKNOWN_ERROR;  
                }
            }
        }
#endif  

		/* Shutter CallBack */
		if(mMsgEnabled & CAMERA_MSG_SHUTTER)
		{
			mNotifyCb(CAMERA_MSG_SHUTTER,0,0,mCallbackCookie);
		} 

		/* Check if the camera driver can accept 1 buffer */
		creqbuf.type   = V4L2_BUF_TYPE_VIDEO_CAPTURE;
		creqbuf.memory = V4L2_MEMORY_USERPTR;
		creqbuf.count  = 1;
		if (ioctl(camera_device, VIDIOC_REQBUFS, &creqbuf) < 0)
		{
			LOGE ("VIDIOC_REQBUFS Failed. errno = %d\n", errno);
			return -1;
		}

		buffer.type = creqbuf.type;
		buffer.memory = creqbuf.memory;
		buffer.index = 0;
		if (ioctl(camera_device, VIDIOC_QUERYBUF, &buffer) < 0) {
			LOGE("VIDIOC_QUERYBUF Failed");
			return -1;
		}

		buffer.m.userptr = base;
		mPictureBuffer = new MemoryBase(mPictureHeap, offset, buffer.length);
		LOGD("Picture Buffer: Base = %p Offset = 0x%x\n", (void *)base, (unsigned int)offset);

		if (ioctl(camera_device, VIDIOC_QBUF, &buffer) < 0) {
			LOGE("CAMERA VIDIOC_QBUF Failed");
			return -1;
		}

		/* turn on streaming */
		creqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
		if (ioctl(camera_device, VIDIOC_STREAMON, &creqbuf.type) < 0)
		{
			LOGE("VIDIOC_STREAMON Failed\n");
			return -1;
		}

		HAL_PRINT("De-queue the next avaliable buffer\n");

		/* De-queue the next avaliable buffer */
		cfilledbuffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
		cfilledbuffer.memory = creqbuf.memory;

		while (ioctl(camera_device, VIDIOC_DQBUF, &cfilledbuffer) < 0) 
		{
			LOGE("VIDIOC_DQBUF Failed cnt = %d\n", err_cnt);
			if(err_cnt++ > 10)
			{
				mNotifyCb(CAMERA_MSG_ERROR,CAMERA_DEVICE_ERROR_FOR_RESTART,0,mCallbackCookie);

				mPictureBuffer.clear();
				mPictureHeap.clear();

				return NO_ERROR;           
			}
		}
#if TIMECHECK
		PPM("AFTER CAPTURE YUV IMAGE\n");
#endif
		/* turn off streaming */
		creqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
		if (ioctl(camera_device, VIDIOC_STREAMOFF, &creqbuf.type) < 0) 
		{
			LOGE("VIDIOC_STREAMON Failed\n");
			return -1;
		}

#if OMAP_SCALE
        if(mCameraIndex == VGA_CAMERA && mCamMode != VT_MODE) {
            if(orientation == 0 || orientation == 180) {
                struct v4l2_control vc;            
                CLEAR(vc);
                vc.id = V4L2_CID_FLIP;                
                vc.value = CAMERA_FLIP_NONE;
                if (ioctl (camera_device, VIDIOC_S_CTRL, &vc) < 0) {
                    LOGE("V4L2_CID_FLIP fail!\n");
                    return UNKNOWN_ERROR;  
                }
            }
        }
#endif  

		if(mCamera_Mode == CAMERA_MODE_JPEG)
		{
			int JPEG_Image_Size = GetJpegImageSize();
			int thumbNailOffset = GetThumbNailOffset();
			int yuvOffset = GetYUVOffset();
			ThumbNail_Data_Size = GetThumbNailDataSize();
			sp<IMemoryHeap> heap = mPictureBuffer->getMemory(&newoffset, &newsize);
			uint8_t* pInJPEGDataBUuf = (uint8_t *)heap->base() + newoffset ;
			uint8_t* pInThumbNailDataBuf = (uint8_t *)heap->base() + thumbNailOffset;
			uint8_t* pYUVDataBuf = (uint8_t *)heap->base() + yuvOffset;

			CreateExif(pInThumbNailDataBuf,ThumbNail_Data_Size,pExifBuf,EXIF_Data_Size,1);

			//create a new binder object 
			mFinalPictureHeap = new MemoryHeapBase(EXIF_Data_Size+JPEG_Image_Size);
			mFinalPictureBuffer = new MemoryBase(mFinalPictureHeap,0,EXIF_Data_Size+JPEG_Image_Size);
			heap = mFinalPictureBuffer->getMemory(&newoffset, &newsize);
			uint8_t* pOutFinalJpegDataBuf = (uint8_t *)heap->base();

			//create a new binder obj to send yuv data
			if(yuvOffset)
			{
				int mFrameSizeConvert = (preview_width*preview_height*3/2) ;

				mYUVPictureHeap = new MemoryHeapBase(mFrameSizeConvert);
				mYUVPictureBuffer = new MemoryBase(mYUVPictureHeap,0,mFrameSizeConvert);
				sp<IMemoryHeap> newheap = mYUVPictureBuffer->getMemory(&newoffset, &newsize);
#if TIMECHECK
				PPM("YUV COLOR CONVERSION STARTED\n");
#endif
				Neon_Convert_yuv422_to_NV21((uint8_t *)pYUVDataBuf, (uint8_t *)newheap->base(), mPreviewWidth, mPreviewHeight);
#if TIMECHECK
				PPM("YUV COLOR CONVERSION ENDED\n");
#endif
			}
			//create final JPEG with EXIF into that
			int OutJpegSize = 0;
			err = CreateJpegWithExif(pInJPEGDataBUuf,JPEG_Image_Size,pExifBuf,EXIF_Data_Size,pOutFinalJpegDataBuf,OutJpegSize);
			if(err==false) return -1;
			
			if(yuvOffset)
			{
#ifdef HARDWARE_OMX
				if(twoSecondReviewMode == 1)
				{
					DrawOverlay(pYUVDataBuf, true);
				}
#endif //HARDWARE_OMX
				if(mMsgEnabled & CAMERA_MSG_RAW_IMAGE)
				{
					mDataCb(CAMERA_MSG_RAW_IMAGE, mYUVPictureBuffer, mCallbackCookie);
				}	
			}

			if (mMsgEnabled & CAMERA_MSG_COMPRESSED_IMAGE)
			{
				mDataCb(CAMERA_MSG_COMPRESSED_IMAGE, mFinalPictureBuffer, mCallbackCookie);
			}

#if OPEN_CLOSE_WORKAROUND
			close(camera_device);
			camera_device = open(VIDEO_DEVICE, O_RDWR);
			if (camera_device < 0) 
			{
				LOGE ("!!!!!!!!!FATAL Error: Could not open the camera device: %s!!!!!!!!!\n", strerror(errno) );
			}
#endif
		}

		if(mCamera_Mode == CAMERA_MODE_YUV)
		{
#ifdef HARDWARE_OMX
			int mFrameSizeConvert = (image_width*image_height*2) ;
			mVGAYUVPictureHeap = new MemoryHeapBase(mFrameSizeConvert);
			mVGAYUVPictureBuffer = new MemoryBase(mVGAYUVPictureHeap,0,mFrameSizeConvert);
			mVGANewheap = mVGAYUVPictureBuffer->getMemory(&newoffset, &newsize);

			sp<IMemoryHeap> heap = mPictureBuffer->getMemory(&newoffset, &newsize);
			uint8_t* pYUVDataBuf = (uint8_t *)heap->base() + newoffset ;
			LOGD("PictureThread: generated a picture, yuv_buffer=%p yuv_len=%d\n",pYUVDataBuf,capture_len);
#if OMAP_SCALE
			TempHeapBase = new MemoryHeapBase(mFrameSizeConvert);
			TempBase = new MemoryBase(TempHeapBase,0,mFrameSizeConvert);
			TempHeap = TempBase->getMemory(&newoffset, &newsize);
			if(scale_process((void*)pYUVDataBuf, mPreviewWidth, mPreviewHeight,(void*)TempHeap->base(), mPreviewHeight, mPreviewWidth, 0, PIX_YUV422I, 1))
			{
				LOGE("scale_process() failed\n");
			}
#endif

#if TIMECHECK
			PPM("YUV COLOR ROTATION STARTED\n");
#endif                   

#if 0	//YUV dump code for testing
			FILE* fIn = NULL;	
			fIn = fopen("/data/output.yuv", "w");
			if ( fIn == NULL ) 	  
			{ 		 
				LOGE("Error: failed to open the file for writing\n");		 
			}		
			fwrite((uint8_t*)mVGANewheap->base(), 1, mPreviewWidth*mPreviewHeight*2, fIn);	   
			fclose(fIn);
#endif

			/*
			   	for VGA capture case
				pYUVDataBuf : Input buffer from Camera Driver YUV422.
				mVGANewheap->base() : 90 or 270 degree rotated YUV422 format.
			 */
			{
				int error = 0;
#if OMAP_SCALE
				neon_args->pIn 		= (uint8_t*)TempHeap->base();
#else
				neon_args->pIn 		= (uint8_t*)pYUVDataBuf;
#endif
				neon_args->pOut 	= (uint8_t*)mVGANewheap->base();
				neon_args->height 	= mPreviewWidth; 
				neon_args->width 	= mPreviewHeight;
#if OMAP_SCALE
				neon_args->rotate 	= NEON_ROT90;
#else
				neon_args->rotate 	= NEON_ROT270;
#endif
				if (Neon_Rotate != NULL)
					error = (*Neon_Rotate)(neon_args);
				else
					LOGE("Rotate Fucntion pointer Null");

				if (error < 0) {
					LOGE("Error in Rotation 90");

				}							    					
			}
#if OMAP_SCALE
			TempHeapBase.clear();
			TempBase.clear();
			TempHeap.clear();
#endif
#if TIMECHECK
			PPM("YUV COLOR ROTATION Done\n");
#endif
#endif //HARDWARE_OMX

			if (mMsgEnabled & CAMERA_MSG_COMPRESSED_IMAGE)
			{
#ifdef HARDWARE_OMX  
				int jpegSize = image_width * image_height*2;
				capture_len = (image_width*image_height*2) ;
				CreateExif(NULL, 0, pExifBuf, EXIF_Data_Size, 0);
				HAL_PRINT("VGA EXIF size : %d\n", EXIF_Data_Size);
				mJPEGPictureHeap = new MemoryHeapBase(jpegSize + 256);
				outBuffer = (void *)((unsigned long)(mJPEGPictureHeap->getBase()) + 128);
#if TIMECHECK
				PPM("BEFORE JPEG Encode Image\n");
#endif
				HAL_PRINT("VGA capture : outbuffer = 0x%x, jpegSize = %d, yuv_buffer = 0x%x, yuv_len = %d, image_width = %d, image_height = %d, quality = %d, mippMode =%d\n", 
							outBuffer, jpegSize, yuv_buffer, capture_len, image_width, image_height, mYcbcrQuality, mippMode); 

				if(isStart_JPEG)
				{
					int jpegFormat = PIX_YUV422I;
#ifdef OMAP_ENHANCEMENT
#if OMAP_SCALE

					err = jpegEncoder->encodeImage(outBuffer, 
											jpegSize, 
											(uint8_t*)mVGANewheap->base(), 
											capture_len, 
											pExifBuf,
											EXIF_Data_Size,
											image_width,	//
											image_height,	//
											mThumbnailWidth,
											mThumbnailHeight,
											mYcbcrQuality,
											jpegFormat);
#else
					err = jpegEncoder->encodeImage(outBuffer, 
											jpegSize, 
											(uint8_t*)mVGANewheap->base(), 
											capture_len, 
											pExifBuf,
											EXIF_Data_Size,
											image_height,	//
											image_width,	//
											mThumbnailWidth,
											mThumbnailHeight,
											mYcbcrQuality,
											jpegFormat);
#endif        	
					LOGD("JPEG ENCODE END\n");

					if(err != true) {
						LOGE("Jpeg encode failed!!\n");
						return -1;
					} else {
						LOGD("Jpeg encode success!!\n");
					}
#endif
				}
#if TIMECHECK
				PPM("AFTER JPEG Encode Image\n");
#endif
#ifdef OMAP_ENHANCEMENT
				mJPEGPictureMemBase = new MemoryBase(mJPEGPictureHeap, 128, jpegEncoder->jpegSize);
#endif
				if (mMsgEnabled & CAMERA_MSG_COMPRESSED_IMAGE)
				{
					mDataCb(CAMERA_MSG_COMPRESSED_IMAGE,mJPEGPictureMemBase,mCallbackCookie);
				}

				mJPEGPictureMemBase.clear();
				mJPEGPictureHeap.clear();
#endif //HARDWARE_OMX
			}//END of CAMERA_MSG_COMPRESSED_IMAGE

			yuv_buffer = (uint8_t*)cfilledbuffer.m.userptr;		           

			if(twoSecondReviewMode == 1)
			{ 	
				DrawOverlay(yuv_buffer, true);
			}  
			//yuv_buffer: [Reused]Output buffer with YUV 420P 270 degree rotated.             
			if(mMsgEnabled & CAMERA_MSG_RAW_IMAGE)
			{
				Neon_Convert_yuv422_to_YUV420P((uint8_t *)mVGANewheap->base(), (uint8_t *)yuv_buffer, mPreviewHeight, mPreviewWidth);         	
				mDataCb(CAMERA_MSG_RAW_IMAGE, mPictureBuffer, mCallbackCookie);
			}        
		}
		//END of CAMERA_MODE_YUV

		mPictureBuffer.clear();
		mPictureHeap.clear();
		if(mCamera_Mode == CAMERA_MODE_JPEG)
		{
			mFinalPictureBuffer.clear();
			mFinalPictureHeap.clear();
			mYUVPictureBuffer.clear();
			mYUVPictureHeap.clear();
		}

		if(mCamera_Mode == CAMERA_MODE_YUV)
		{
			if(mCameraIndex == VGA_CAMERA)
			{
				mVGAYUVPictureBuffer.clear();
				mVGAYUVPictureHeap.clear();
			}
		}

		delete []pExifBuf;
		mCaptureFlag = false;
		LOG_FUNCTION_NAME_EXIT

		return NO_ERROR;

	}
//!
//! @brief This function takes the stream coming from the selected USB pipe and sends
//! it to the DAC driver. Moreover, it ensures that both input and output stream
//! keep synchronized by adding or deleting samples.
//!
//! @param side          USB_STREAM_HOST for USB host, USB_STREAM_DEVICE for device.
//! @param pipe_in       Number of the addressed pipe/endpoint
//! @param pFifoCount    (return parameter) NULL or pointer to the number of used buffers at this time
//!
//! @return              status: (USB_STREAM_STATUS_OK, USB_STREAM_STATUS_NOT_SYNCHRONIZED,
//!                      USB_STREAM_STATUS_SPEED_UP, USB_STREAM_STATUS_SLOW_DOWN, USB_STREAM_STATUS_BUFFER_OVERFLOW)
//!
int usb_stream_input(usb_stream_side_t side, uint8_t pipe_in, uint32_t* pFifoCount)
{
   uint16_t      fifo_used_cnt;
   uint16_t      byte_count=0;
   uint32_t      i;
   UnionPtr pswap;
   UnionPtr buffer;

   // We comes here since we have received something. Let's increase the internal
   // activity counter.
   usb_stream_cnt++;

   fifo_used_cnt=usb_stream_fifo_get_used_room();
   if (pFifoCount)
      *pFifoCount = fifo_used_cnt;

   // usb_stream_fifo_get_free_room()
   if( USB_STREAM_BUFFER_NUMBER-fifo_used_cnt==0 )
   {  // Fatal error: even with the synchro mechanism acting, we are in a case in which the
      // buffers are full.
      usb_stream_context->synchronized = false;
      usb_stream_context->status = USB_STREAM_ERROR_NOT_SYNCHRONIZED;
      return usb_stream_context->status;
   }

   pswap.s8ptr  =
   buffer.s8ptr = usb_stream_fifo_get_buffer(usb_stream_context->wr_id);

#if USB_HOST_FEATURE == true
   if( side==USB_STREAM_HOST )
   {
      byte_count=Host_byte_count(pipe_in);
   }
#endif
#if USB_DEVICE_FEATURE == true
   if( side==USB_STREAM_DEVICE )
   {
      byte_count=Usb_byte_count(pipe_in);
   }
#endif
  if( byte_count==0 )
  {
     if( cpu_is_timeout(&broken_stream_timer) ) {
        usb_stream_context->status = USB_STREAM_ERROR_BROKEN_STREAM;
     } else {
        usb_stream_context->status = USB_STREAM_ERROR_NO_DATA;
     }
     return usb_stream_context->status;
  }
  else
  {
    // reset time out detection
    cpu_set_timeout(cpu_ms_2_cy(BROKEN_STREAM_TIMER, FCPU_HZ), &broken_stream_timer);
  }

#if USB_HOST_FEATURE == true
   if( side==USB_STREAM_HOST )
   {
      Host_reset_pipe_fifo_access(pipe_in);
      host_read_p_rxpacket(pipe_in, (void*)buffer.s8ptr, byte_count, NULL);
	 }
#endif
#if USB_DEVICE_FEATURE == true
   if( side==USB_STREAM_DEVICE )
	 {
      Usb_reset_endpoint_fifo_access(pipe_in);
      usb_read_ep_rxpacket(pipe_in, (void*)buffer.s8ptr, byte_count, NULL);
	}
#endif

   usb_stream_context->status = USB_STREAM_ERROR_NONE;

   if( byte_count > USB_STREAM_REAL_BUFFER_SIZE )
   {
      byte_count = USB_STREAM_REAL_BUFFER_SIZE;
      usb_stream_context->status = USB_STREAM_ERROR_OVERFLOW;
   }

   // Swap samples since they are coming from the USB world.
   if( usb_stream_context->bits_per_sample==16 )
      for( i=0 ; i<byte_count/(16/8) ; i++ )
         pswap.s16ptr[i] = swap16(pswap.s16ptr[i]);

   else if( usb_stream_context->bits_per_sample==32 )
      for( i=0 ; i<byte_count/(32/8) ; i++ )
         pswap.s32ptr[i] = swap32(pswap.s32ptr[i]);

   //for( i=0 ; i<byte_count/2 ; i++ )
   //   printf("0x%04hx ", pswap[i]);
   //printf("\r\n");

   usb_stream_fifo_push(byte_count);
   fifo_used_cnt++;

   if( !usb_stream_context->synchronized )
   {
      usb_stream_context->status = USB_STREAM_ERROR_NOT_SYNCHRONIZED;

      if( fifo_used_cnt>=(USB_STREAM_BUFFER_NUMBER/2) )
      {  // We have enough buffers to start the playback.
         void* buffer;
         uint16_t   size;

         // CS2200
         cs2200_freq_clk_out(_32_BITS_RATIO(usb_stream_resync_frequency, CS2200_FREF));
         usb_stream_resync_step = PPM(usb_stream_resync_frequency, USB_STREAM_RESYNC_PPM_STEPS);
         usb_stream_resync_freq_ofst = usb_stream_resync_frequency;
         usb_stream_resync_ppm_ofst = 0;
         usb_stream_resync_last_room = fifo_used_cnt;
#define TIMER_USB_RESYNC_CORRECTION  320
         cpu_set_timeout( cpu_ms_2_cy(TIMER_USB_RESYNC_CORRECTION, FCPU_HZ), &usb_resync_timer );

         usb_stream_context->synchronized=true;
         usb_stream_fifo_get(&buffer, &size);
         audio_mixer_dacs_output_direct(buffer, size/(usb_stream_context->channel_count*usb_stream_context->bits_per_sample/8));

         // Fill also the reload stage of the PDCA.
         usb_stream_fifo_pull();
         usb_stream_fifo_get(&buffer, &size);
         audio_mixer_dacs_output_direct(buffer, size/(usb_stream_context->channel_count*usb_stream_context->bits_per_sample/8));
      }
   }

   return usb_stream_context->status;
}
	int CameraHal::CapturePicture()
	{
		int image_width, image_height, preview_width, preview_height;
        int capture_len;
		unsigned long base, offset;
      
#ifdef R3D4_CONVERT     
        CColorConvert* pConvert;    //class for image processing
#endif		
		struct v4l2_buffer buffer; // for VIDIOC_QUERYBUF and VIDIOC_QBUF
		struct v4l2_format format;
		//struct v4l2_buffer cfilledbuffer; // for VIDIOC_DQBUF
		struct v4l2_requestbuffers creqbuf; // for VIDIOC_REQBUFS and VIDIOC_STREAMON and VIDIOC_STREAMOFF

		sp<MemoryBase> 		mPictureBuffer;
		sp<MemoryBase> 		mFinalPictureBuffer;
		sp<MemoryHeapBase>  mJPEGPictureHeap;
		sp<MemoryBase>		mJPEGPictureMemBase;


		ssize_t newoffset;
		size_t newsize;

		mCaptureFlag = true;
		int jpegSize;
		void* outBuffer;
		int err, i;
		int err_cnt = 0;


		int exifDataSize = 0;
		int thumbnaiDataSize = 0;
		unsigned char* pExifBuf = new unsigned char[64*1024];

		int twoSecondReviewMode = getTwoSecondReviewMode();
		int orientation = getOrientation();

		LOG_FUNCTION_NAME
		
		                           
		if (CameraSetFrameRate())
		{
			LOGE("Error in setting Camera frame rate\n");
			return -1;
		}
        
		LOGD("\n\n\n PICTURE NUMBER =%d\n\n\n",++pictureNumber);
       
        mParameters.getPictureSize(&image_width, &image_height);
		mParameters.getPreviewSize(&preview_width, &preview_height);	
		LOGV("mCameraIndex = %d\n", mCameraIndex);
		LOGD("Picture Size: Width = %d \t Height = %d\n", image_width, image_height);

        /* set size & format of the video image */
		format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
		format.fmt.pix.width = image_width;
		format.fmt.pix.height = image_height;
        
		if(mCamera_Mode == CAMERA_MODE_JPEG)
		{
            format.fmt.pix.pixelformat = PIXEL_FORMAT_JPEG;
			capture_len =  GetJPEG_Capture_Width() * GetJPEG_Capture_Height() * JPG_BYTES_PER_PIXEL;
		}
		else
		{
            format.fmt.pix.pixelformat = PIXEL_FORMAT;
			capture_len = image_width * image_height * UYV_BYTES_PER_PIXEL;   
		}

         // round up to 4096 bytes
		if (capture_len & 0xfff)   
			capture_len = (capture_len & 0xfffff000) + 0x1000;

		LOGV("capture: %s mode, pictureFrameSize = 0x%x = %d\n", 
            (mCamera_Mode == CAMERA_MODE_JPEG)?"jpeg":"yuv", capture_len, capture_len);

            
		mPictureHeap = new MemoryHeapBase(capture_len);
		base = (unsigned long)mPictureHeap->getBase();
		base = (base + 0xfff) & 0xfffff000;
		offset = base - (unsigned long)mPictureHeap->getBase();


        // set capture format
		if (ioctl(camera_device, VIDIOC_S_FMT, &format) < 0)
		{
			LOGE ("Failed to set VIDIOC_S_FMT.\n");
			return -1;
		}
#if OMAP_SCALE       
        if(mCameraIndex == VGA_CAMERA && mCamMode != VT_MODE)
            if(orientation == 0 || orientation == 180)
                setFlip(CAMERA_FLIP_MIRROR);
#endif
		/* Shutter CallBack */
		if(mMsgEnabled & CAMERA_MSG_SHUTTER)
		{
			mNotifyCb(CAMERA_MSG_SHUTTER, 0, 0, mCallbackCookie);
		} 
		
		/* Check if the camera driver can accept 1 buffer */
		creqbuf.type   = V4L2_BUF_TYPE_VIDEO_CAPTURE;
		creqbuf.memory = V4L2_MEMORY_USERPTR;
		creqbuf.count  = 1;
		if (ioctl(camera_device, VIDIOC_REQBUFS, &creqbuf) < 0)
		{
			LOGE ("VIDIOC_REQBUFS Failed. errno = %d\n", errno);
			return -1;
		}

		buffer.type = creqbuf.type;
		buffer.memory = creqbuf.memory;
		buffer.index = 0;
		if (ioctl(camera_device, VIDIOC_QUERYBUF, &buffer) < 0) {
			LOGE("VIDIOC_QUERYBUF Failed");
			return -1;
		}

		buffer.m.userptr = base;
		mPictureBuffer = new MemoryBase(mPictureHeap, offset, buffer.length);
		LOGD("Picture Buffer: Base = %p Offset = 0x%x\n", (void *)base, (unsigned int)offset);

		if (ioctl(camera_device, VIDIOC_QBUF, &buffer) < 0) {
			LOGE("CAMERA VIDIOC_QBUF Failed");
			return -1;
		}

		/* turn on streaming */
		if (ioctl(camera_device, VIDIOC_STREAMON, &creqbuf.type) < 0)
		{
			LOGE("VIDIOC_STREAMON Failed\n");
			return -1;
		}

		LOGD("De-queue the next avaliable buffer\n");

		/* De-queue the next avaliable buffer */       
        //try to get buffer from camearo for 10 times
		while (ioctl(camera_device, VIDIOC_DQBUF, &buffer) < 0) 
		{
			LOGE("VIDIOC_DQBUF Failed cnt = %d\n", err_cnt);
			if(err_cnt++ > 10)
			{
				mNotifyCb(CAMERA_MSG_ERROR, CAMERA_DEVICE_ERROR_FOR_RESTART, 0, mCallbackCookie);

				mPictureBuffer.clear();
				mPictureHeap.clear();

				return NO_ERROR;           
			}
		}
		PPM("AFTER CAPTURE YUV IMAGE\n");
		/* turn off streaming */
        
		if (ioctl(camera_device, VIDIOC_STREAMOFF, &creqbuf.type) < 0) 
		{
			LOGE("VIDIOC_STREAMON Failed\n");
			return -1;
		}
#if OMAP_SCALE          
        if(mCameraIndex == VGA_CAMERA && mCamMode != VT_MODE)
            if(orientation == 0 || orientation == 180)
                setFlip(CAMERA_FLIP_NONE);
#endif                
        // camera returns processed jpeg image
		if(mCamera_Mode == CAMERA_MODE_JPEG)
		{
			int JPEG_Image_Size = GetJpegImageSize();
			int thumbNailOffset = 0;	//m4mo doesnt store offset ?
			int yuvOffset =0;			//m4mo doesnt store yuv image ?
			// int thumbNailOffset = GetThumbNailOffset();
			// int yuvOffset = GetYUVOffset();
			thumbnaiDataSize = GetThumbNailDataSize();
			sp<IMemoryHeap> heap = mPictureBuffer->getMemory(&newoffset, &newsize);
			uint8_t* pInJPEGDataBUuf = (uint8_t *)heap->base() + newoffset ;			//ptr to jpeg data
			uint8_t* pInThumbNailDataBuf = (uint8_t *)heap->base() + thumbNailOffset;	//ptr to thmubnail
			uint8_t* pYUVDataBuf = (uint8_t *)heap->base() + yuvOffset;

			// FILE* fOut = NULL;
			// fOut = fopen("/dump/dump.jpg", "w");
			// fwrite(pInJPEGDataBUuf, 1, JPEG_Image_Size, fOut);
			// fclose(fOut);
			
			CreateExif(pInThumbNailDataBuf, thumbnaiDataSize, pExifBuf, exifDataSize, EXIF_SET_JPEG_LENGTH);

			//create a new binder object 
			mFinalPictureHeap = new MemoryHeapBase(exifDataSize+JPEG_Image_Size);
			mFinalPictureBuffer = new MemoryBase(mFinalPictureHeap,0,exifDataSize+JPEG_Image_Size);
			heap = mFinalPictureBuffer->getMemory(&newoffset, &newsize);
			uint8_t* pOutFinalJpegDataBuf = (uint8_t *)heap->base();

			
			//create a new binder obj to send yuv data
			if(yuvOffset)
			{
				int mFrameSizeConvert = (preview_width*preview_height*3/2) ;

				mYUVPictureHeap = new MemoryHeapBase(mFrameSizeConvert);
				mYUVPictureBuffer = new MemoryBase(mYUVPictureHeap,0,mFrameSizeConvert);
				mYUVNewheap = mYUVPictureBuffer->getMemory(&newoffset, &newsize);

				PPM("YUV COLOR CONVERSION STARTED\n");
#ifdef NEON

				Neon_Convert_yuv422_to_NV21((uint8_t *)pYUVDataBuf, 
                    (uint8_t *)mYUVNewheap->base(), mPreviewWidth, mPreviewHeight);

				PPM("YUV COLOR CONVERSION ENDED\n");

				if(mMsgEnabled & CAMERA_MSG_RAW_IMAGE)
				{
					mDataCb(CAMERA_MSG_RAW_IMAGE, mYUVPictureBuffer, mCallbackCookie);
				}	
#else
                if(mMsgEnabled & CAMERA_MSG_RAW_IMAGE)
                    mDataCb(CAMERA_MSG_RAW_IMAGE, pYUVDataBuf, mCallbackCookie);

#endif
			}
			//create final JPEG with EXIF into that
			int OutJpegSize = 0;
			if(!CreateJpegWithExif( pInJPEGDataBUuf, JPEG_Image_Size, pExifBuf, exifDataSize, pOutFinalJpegDataBuf, OutJpegSize))
            {
                LOGE("createJpegWithExif fail!!\n");
                return -1;
            }

            if (mMsgEnabled & CAMERA_MSG_COMPRESSED_IMAGE)
            {
                mDataCb(CAMERA_MSG_COMPRESSED_IMAGE, mFinalPictureBuffer, mCallbackCookie);
             }

		}   //CAMERA_MODE_JPEG
        
        // camera returns 16 bit uyv image
        // -> needs to process (rotate/flip) 
        // -> and compess to jpeg (with dsp)
		if(mCamera_Mode == CAMERA_MODE_YUV)
		{
#ifdef HARDWARE_OMX
            // create new buffer for image processing
			int mFrameSizeConvert = (image_width*image_height*2) ;
			mYUVPictureHeap = new MemoryHeapBase(mFrameSizeConvert);
			mYUVPictureBuffer = new MemoryBase(mYUVPictureHeap,0,mFrameSizeConvert);
			mYUVNewheap = mYUVPictureBuffer->getMemory(&newoffset, &newsize);
            
            // buffer from v4l holding the actual image
            uint8_t *pYuvBuffer = (uint8_t*)buffer.m.userptr;    
            
			LOGD("PictureThread: generated a picture, pYuvBuffer=%p yuv_len=%d\n", 
                pYuvBuffer, capture_len);
                     
              
			PPM("YUV COLOR ROTATION STARTED\n");
    
#ifdef R3D4_CONVERT     
            if(mCameraIndex == VGA_CAMERA)
            {
				LOGV("use rotation");
                 // color converter and image processing (flip/rotate)
                 // neon lib doesnt seem to work, jpeg was corrupted?
                 // so use own stuff
                pConvert = new CColorConvert(pYuvBuffer, image_width, image_height, UYV2);
                
                //pConvert->writeFile(DUMP_PATH "before_rotate.uyv", SOURCE);  
                //pConvert->writeFile(DUMP_PATH "before_rotate.bmp", BMP);      
               
                if(mCameraIndex == VGA_CAMERA )
                    pConvert->rotateImage(ROTATE_270);
                // else
                   // pConvert->flipImage(FLIP_VERTICAL);
                
                // write rotatet image back to input buffer
                //pConvert->writeFile(DUMP_PATH "after_rotate.bmp", BMP);   
                pConvert->makeUYV2(NULL, INPLACE);  //INPLACE: no new buffer, write to input buffer   
                image_width = pConvert->getWidth();
                image_height = pConvert->geHeight();
            }
#else

#endif            
			PPM("YUV COLOR ROTATION Done\n");           
         
             //pYuvBuffer: [Reused]Output buffer with YUV 420P 270 degree rotated.             
			if(mMsgEnabled & CAMERA_MSG_RAW_IMAGE)
			{   
                // convert pYuvBuffer(YUV422I) to mYUVPictureBuffer(YUV420P)
				Neon_Convert_yuv422_to_YUV420P(pYuvBuffer, (uint8_t *)mYUVNewheap->base(), image_width, image_height);         	
				mDataCb(CAMERA_MSG_RAW_IMAGE, mYUVPictureBuffer, mCallbackCookie);
			}

#endif //HARDWARE_OMX

			if (mMsgEnabled & CAMERA_MSG_COMPRESSED_IMAGE)
			{
#ifdef HARDWARE_OMX  
                // int inputFormat = PIX_YUV420P;
                // int imputSize = image_width * image_height * PIX_YUV420P_BYTES_PER_PIXEL; 

                int inputFormat = PIX_YUV422I;
                int inputSize = image_width * image_height * PIX_YUV422I_BYTES_PER_PIXEL;
				int jpegSize = image_width * image_height * JPG_BYTES_PER_PIXEL;
                
				CreateExif(NULL, 0, pExifBuf, exifDataSize, EXIF_NOTSET_JPEG_LENGTH);
				HAL_PRINT("VGA EXIF size : %d\n", exifDataSize);
                
				mJPEGPictureHeap = new MemoryHeapBase(jpegSize + 256);
				outBuffer = (void *)((unsigned long)(mJPEGPictureHeap->getBase()) + 128);


      
				HAL_PRINT("YUV capture : outbuffer = 0x%x, jpegSize = %d, pYuvBuffer = 0x%x, yuv_len = %d, image_width = %d, image_height = %d, quality = %d, mippMode =%d\n", 
							outBuffer, jpegSize, pYuvBuffer, capture_len, image_width, image_height, mYcbcrQuality, mippMode); 

				if(jpegEncoder)
				{
                	PPM("BEFORE JPEG Encode Image\n");
					err = jpegEncoder->encodeImage(
                            outBuffer,                          // void* outputBuffer, 
                            jpegSize,                           // int outBuffSize, 
                            pYuvBuffer,                         // void *inputBuffer, 
                            inputSize,                          // int inBuffSize, 
                            pExifBuf,                           // unsigned char* pExifBuf,
                            exifDataSize,                       // int ExifSize,
                            image_width,	                    // int width, 
                            image_height,	                    // int height, 
                            mThumbnailWidth,                    // int ThumbWidth, 
                            mThumbnailHeight,                   // int ThumbHeight, 
                            mYcbcrQuality,                      // int quality,
                            inputFormat);                       // int isPixelFmt420p)
                    PPM("AFTER JPEG Encode Image\n");
					LOGD("JPEG ENCODE END\n");

					if(err != true) 
                    {
						LOGE("Jpeg encode failed!!\n");
						return -1;
					} 
                    else 
						LOGD("Jpeg encode success!!\n");
				}

				mJPEGPictureMemBase = new MemoryBase(mJPEGPictureHeap, 128, jpegEncoder->jpegSize);

				if (mMsgEnabled & CAMERA_MSG_COMPRESSED_IMAGE)
				{
					mDataCb(CAMERA_MSG_COMPRESSED_IMAGE, mJPEGPictureMemBase, mCallbackCookie);
				}

				mJPEGPictureMemBase.clear();
				mJPEGPictureHeap.clear();
#endif //HARDWARE_OMX
			}//END of CAMERA_MSG_COMPRESSED_IMAGE
       

 
#ifdef R3D4_CONVERT 
            delete pConvert;  
#endif            

            
		}//END of CAMERA_MODE_YUV
        
		mPictureBuffer.clear();
		mPictureHeap.clear();
        
		if(mCamera_Mode == CAMERA_MODE_JPEG)
		{
			mFinalPictureBuffer.clear();
			mFinalPictureHeap.clear();

		}
         
        mYUVPictureBuffer.clear();
        mYUVPictureHeap.clear();
        
		delete []pExifBuf;
		mCaptureFlag = false;
                
		LOG_FUNCTION_NAME_EXIT

		return NO_ERROR;

	}
Exemple #15
0
static void mix_MultiPosition(u8 action) {
    s8 val;

    if (action == MLA_CHG) {
	// change value
	if (menu_set == 0) {
	    // channel number/off
	    val = cm.channel_MP;
	    if (!val)  val = 2;
	    else if (val == MP_DIG)  val = (s8)(channels + 1);
	    val = (u8)menu_change_val(val, 2, channels + 1, 1, 1);
	    if (val == 2)   			cm.channel_MP = 0;
	    else if (val == (s8)(channels + 1))	cm.channel_MP = MP_DIG;
	    else	    			cm.channel_MP = val;
	}
	else {
	    // position value + END state (END not for first position)
	    val = cm.multi_position[menu_set - 1];
	    if (val == MULTI_POSITION_END)  val = -101;
	    val = (s8)menu_change_val(val, menu_set == 1 ? -100 : -101, 100,
				      CHANNEL_FAST, 0);
	    if (val == -101) {
		// set all from this to END value
		memset(&cm.multi_position[menu_set - 1], (u8)MULTI_POSITION_END,
		       NUM_MULTI_POSITION + 1 - menu_set);
	    }
	    else cm.multi_position[menu_set - 1] = val;
	}
    }
    else if (action == MLA_NEXT) {
	// select next value
	if (cm.channel_MP) {
	    if (menu_set == 0)  menu_set = 1;
	    else if (cm.multi_position[menu_set - 1] == MULTI_POSITION_END
		    || ++menu_set > NUM_MULTI_POSITION)  menu_set = 0;
	}
	// allow forcing channel value
	if (menu_set && cm.channel_MP && cm.channel_MP <= channels) {
	    menu_force_value_channel = cm.channel_MP;
	}
	else menu_force_value_channel = 0;
    }

    // show value
    lcd_7seg(L7_P);
    if (menu_set == 0) {
	// channel number/OFF
	if (!cm.channel_MP)	lcd_chars("OFF");
	else if (cm.channel_MP == MP_DIG)
				lcd_chars("DIG");
	else			lcd_char_num3(cm.channel_MP);
	lcd_segment(LS_SYM_CHANNEL, LS_ON);
    }
    else {
	// position value
	val = cm.multi_position[menu_set - 1];
	if (val == MULTI_POSITION_END) {
	    lcd_chars("END");
	    val = -100;
	}
	else  lcd_char_num3(val);
	if (cm.channel_MP == MP_DIG)	menu_DIG_mix = val;
	else				menu_force_value = val * PPM(5);
    }
}