static irqreturn_t bu21150_irq_thread(int irq, void *dev_id)
{
	struct bu21150_data *ts = dev_id;
	u8 *psbuf = (u8 *)ts->frame_work;

	mutex_lock(&ts->mutex_wake);

	if (!ts->stay_awake && ts->wake_up &&
			ts->scan_mode == AFE_SCAN_GESTURE_SELF_CAP) {
		pm_stay_awake(&ts->client->dev);
		ts->stay_awake = true;
	}

	mutex_unlock(&ts->mutex_wake);

	/* get frame */
	ts->frame_work_get = ts->req_get;
	bu21150_read_register(REG_READ_DATA, ts->frame_work_get.size, psbuf);

	if (ts->reset_flag == 0) {
#ifdef CHECK_SAME_FRAME
		check_same_frame(ts);
#endif
		copy_frame(ts);
		wake_up_frame_waitq(ts);
	} else {
		ts->reset_flag = 0;
	}

	return IRQ_HANDLED;
}
Beispiel #2
0
static void redraw(void)
{
	float angle = get_msec() / 10.0;
	mgl_clear(0);
	mgl_clear_depth();

	mgl_matrix_mode(MGL_MODELVIEW);
	mgl_load_identity();
	mgl_translate(0, 0, -cam_zoom);
	if(auto_rotate) {
		mgl_rotate(angle * 0.5, 1, 0, 0);
		mgl_rotate(angle, 0, 0, 1);
	} else {
		mgl_rotate(cam_phi, 1, 0, 0);
		mgl_rotate(cam_theta, 0, 1, 0);
	}

	switch(prim) {
	case TORUS:
		mgl_index(green_base);
		mgl_torus(1.0, 0.25, 16, 8);
		break;
	case SPHERE:
		mgl_index(blue_base);
		mgl_sphere(1.0, 16, 8);
		break;
	case CUBE:
		mgl_index(red_base);
		mgl_cube(1.0);
	}

	if(!auto_rotate) {
		draw_cursor(fbuf, 320, 200, mx, my, white_base + grad_range - 1);
	}

    copy_frame(fbuf);
	if(use_vsync) {
		wait_vsync();
	}
	num_frm++;
}
static inline CopyRet receive_frame(AVCodecContext *avctx,
                                    void *data, int *data_size,
                                    uint8_t second_field)
{
	BC_STATUS ret;
	BC_DTS_PROC_OUT output =
	{
		.PicInfo.width  = avctx->width,
		.PicInfo.height = avctx->height,
	};
	CHDContext *priv = avctx->priv_data;
	HANDLE dev       = priv->dev;

	*data_size = 0;

	// Request decoded data from the driver
	ret = DtsProcOutputNoCopy(dev, OUTPUT_PROC_TIMEOUT, &output);
	if (ret == BC_STS_FMT_CHANGE)
	{
		av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Initial format change\n");
		avctx->width  = output.PicInfo.width;
		avctx->height = output.PicInfo.height;
		return RET_COPY_AGAIN;
	}
	else if (ret == BC_STS_SUCCESS)
	{
		int copy_ret = -1;
		if (output.PoutFlags & BC_POUT_FLAGS_PIB_VALID)
		{
			if (priv->last_picture == -1)
			{
				/*
				 * Init to one less, so that the incrementing code doesn't
				 * need to be special-cased.
				 */
				priv->last_picture = output.PicInfo.picture_number - 1;
			}

			if (avctx->codec->id == CODEC_ID_MPEG4 &&
			        output.PicInfo.timeStamp == 0)
			{
				av_log(avctx, AV_LOG_VERBOSE,
				       "CrystalHD: Not returning packed frame twice.\n");
				priv->last_picture++;
				DtsReleaseOutputBuffs(dev, NULL, FALSE);
				return RET_COPY_AGAIN;
			}

			print_frame_info(priv, &output);

			if (priv->last_picture + 1 < output.PicInfo.picture_number)
			{
				av_log(avctx, AV_LOG_WARNING,
				       "CrystalHD: Picture Number discontinuity\n");
				/*
				 * Have we lost frames? If so, we need to shrink the
				 * pipeline length appropriately.
				 *
				 * XXX: I have no idea what the semantics of this situation
				 * are so I don't even know if we've lost frames or which
				 * ones.
				 *
				 * In any case, only warn the first time.
				 */
				priv->last_picture = output.PicInfo.picture_number - 1;
			}

			copy_ret = copy_frame(avctx, &output, data, data_size, second_field);
			if (*data_size > 0)
			{
				avctx->has_b_frames--;
				priv->last_picture++;
				av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Pipeline length: %u\n",
				       avctx->has_b_frames);
			}
		}
		else
		{
			/*
			 * An invalid frame has been consumed.
			 */
			av_log(avctx, AV_LOG_ERROR, "CrystalHD: ProcOutput succeeded with "
			       "invalid PIB\n");
			avctx->has_b_frames--;
			copy_ret = RET_OK;
		}
		DtsReleaseOutputBuffs(dev, NULL, FALSE);

		return copy_ret;
	}
	else if (ret == BC_STS_BUSY)
	{
		return RET_COPY_AGAIN;
	}
	else
	{
		av_log(avctx, AV_LOG_ERROR, "CrystalHD: ProcOutput failed %d\n", ret);
		return RET_ERROR;
	}
}


static int decode(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt)
{
	BC_STATUS ret;
	BC_DTS_STATUS decoder_status;
	CopyRet rec_ret;
	CHDContext *priv   = avctx->priv_data;
	HANDLE dev         = priv->dev;
	int len            = avpkt->size;

	av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: decode_frame\n");

	if (len)
	{
		int32_t tx_free = (int32_t)DtsTxFreeSize(dev);
		if (len < tx_free - 1024)
		{
			/*
			 * Despite being notionally opaque, either libcrystalhd or
			 * the hardware itself will mangle pts values that are too
			 * small or too large. The docs claim it should be in units
			 * of 100ns. Given that we're nominally dealing with a black
			 * box on both sides, any transform we do has no guarantee of
			 * avoiding mangling so we need to build a mapping to values
			 * we know will not be mangled.
			 */
			uint64_t pts = opaque_list_push(priv, avctx->pkt->pts);
			if (!pts)
			{
				return AVERROR(ENOMEM);
			}
			av_log(priv->avctx, AV_LOG_VERBOSE,
			       "input \"pts\": %"PRIu64"\n", pts);
			ret = DtsProcInput(dev, avpkt->data, len, pts, 0);
			if (ret == BC_STS_BUSY)
			{
				av_log(avctx, AV_LOG_WARNING,
				       "CrystalHD: ProcInput returned busy\n");
				usleep(BASE_WAIT);
				return AVERROR(EBUSY);
			}
			else if (ret != BC_STS_SUCCESS)
			{
				av_log(avctx, AV_LOG_ERROR,
				       "CrystalHD: ProcInput failed: %u\n", ret);
				return -1;
			}
			avctx->has_b_frames++;
		}
		else
		{
			av_log(avctx, AV_LOG_WARNING, "CrystalHD: Input buffer full\n");
			len = 0; // We didn't consume any bytes.
		}
	}
	else
	{
		av_log(avctx, AV_LOG_INFO, "CrystalHD: No more input data\n");
	}

	if (priv->skip_next_output)
	{
		av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Skipping next output.\n");
		priv->skip_next_output = 0;
		avctx->has_b_frames--;
		return len;
	}

	ret = DtsGetDriverStatus(dev, &decoder_status);
	if (ret != BC_STS_SUCCESS)
	{
		av_log(avctx, AV_LOG_ERROR, "CrystalHD: GetDriverStatus failed\n");
		return -1;
	}

	/*
	 * No frames ready. Don't try to extract.
	 *
	 * Empirical testing shows that ReadyListCount can be a damn lie,
	 * and ProcOut still fails when count > 0. The same testing showed
	 * that two more iterations were needed before ProcOutput would
	 * succeed.
	 */
	if (priv->output_ready < 2)
	{
		if (decoder_status.ReadyListCount != 0)
			priv->output_ready++;
		usleep(BASE_WAIT);
		av_log(avctx, AV_LOG_INFO, "CrystalHD: Filling pipeline.\n");
		return len;
	}
	else if (decoder_status.ReadyListCount == 0)
	{
		/*
		 * After the pipeline is established, if we encounter a lack of frames
		 * that probably means we're not giving the hardware enough time to
		 * decode them, so start increasing the wait time at the end of a
		 * decode call.
		 */
		usleep(BASE_WAIT);
		priv->decode_wait += WAIT_UNIT;
		av_log(avctx, AV_LOG_INFO, "CrystalHD: No frames ready. Returning\n");
		return len;
	}

	do
	{
		rec_ret = receive_frame(avctx, data, data_size, 0);
		if (rec_ret == 0 && *data_size == 0)
		{
			if (avctx->codec->id == CODEC_ID_H264)
			{
				/*
				 * This case is for when the encoded fields are stored
				 * separately and we get a separate avpkt for each one. To keep
				 * the pipeline stable, we should return nothing and wait for
				 * the next time round to grab the second field.
				 * H.264 PAFF is an example of this.
				 */
				av_log(avctx, AV_LOG_VERBOSE, "Returning after first field.\n");
				avctx->has_b_frames--;
			}
			else
			{
				/*
				 * This case is for when the encoded fields are stored in a
				 * single avpkt but the hardware returns then separately. Unless
				 * we grab the second field before returning, we'll slip another
				 * frame in the pipeline and if that happens a lot, we're sunk.
				 * So we have to get that second field now.
				 * Interlaced mpeg2 and vc1 are examples of this.
				 */
				av_log(avctx, AV_LOG_VERBOSE, "Trying to get second field.\n");
				while (1)
				{
					usleep(priv->decode_wait);
					ret = DtsGetDriverStatus(dev, &decoder_status);
					if (ret == BC_STS_SUCCESS &&
					        decoder_status.ReadyListCount > 0)
					{
						rec_ret = receive_frame(avctx, data, data_size, 1);
						if ((rec_ret == 0 && *data_size > 0) ||
						        rec_ret == RET_ERROR)
							break;
					}
				}
				av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Got second field.\n");
			}
		}
		else if (rec_ret == RET_SKIP_NEXT_COPY)
		{
			/*
			 * Two input packets got turned into a field pair. Gawd.
			 */
			av_log(avctx, AV_LOG_VERBOSE,
			       "Don't output on next decode call.\n");
			priv->skip_next_output = 1;
		}
		/*
		 * If rec_ret == RET_COPY_AGAIN, that means that either we just handled
		 * a FMT_CHANGE event and need to go around again for the actual frame,
		 * we got a busy status and need to try again, or we're dealing with
		 * packed b-frames, where the hardware strangely returns the packed
		 * p-frame twice. We choose to keep the second copy as it carries the
		 * valid pts.
		 */
	}
	while (rec_ret == RET_COPY_AGAIN);
	usleep(priv->decode_wait);
	return len;
}
Beispiel #4
0
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
                        AVPacket *avpkt) {
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    NuvContext *c = avctx->priv_data;
    AVFrame *picture = data;
    int orig_size = buf_size;
    int keyframe;
    int result;
    enum {NUV_UNCOMPRESSED = '0', NUV_RTJPEG = '1',
          NUV_RTJPEG_IN_LZO = '2', NUV_LZO = '3',
          NUV_BLACK = 'N', NUV_COPY_LAST = 'L'} comptype;

    if (buf_size < 12) {
        av_log(avctx, AV_LOG_ERROR, "coded frame too small\n");
        return -1;
    }

    // codec data (rtjpeg quant tables)
    if (buf[0] == 'D' && buf[1] == 'R') {
        int ret;
        // skip rest of the frameheader.
        buf = &buf[12];
        buf_size -= 12;
        ret = get_quant(avctx, c, buf, buf_size);
        if (ret < 0)
            return ret;
        rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, c->lq, c->cq);
        return orig_size;
    }

    if (buf[0] != 'V' || buf_size < 12) {
        av_log(avctx, AV_LOG_ERROR, "not a nuv video frame\n");
        return -1;
    }
    comptype = buf[1];
    switch (comptype) {
        case NUV_RTJPEG_IN_LZO:
        case NUV_RTJPEG:
            keyframe = !buf[2]; break;
        case NUV_COPY_LAST:
            keyframe = 0; break;
        default:
            keyframe = 1; break;
    }
    // skip rest of the frameheader.
    buf = &buf[12];
    buf_size -= 12;
    if (comptype == NUV_RTJPEG_IN_LZO || comptype == NUV_LZO) {
        int outlen = c->decomp_size, inlen = buf_size;
        if (av_lzo1x_decode(c->decomp_buf, &outlen, buf, &inlen))
            av_log(avctx, AV_LOG_ERROR, "error during lzo decompression\n");
        buf = c->decomp_buf;
        buf_size = c->decomp_size;
    }
    if (c->codec_frameheader) {
        int w, h, q;
        if (buf_size < 12) {
            av_log(avctx, AV_LOG_ERROR, "invalid nuv video frame\n");
            return -1;
        }
        w = AV_RL16(&buf[6]);
        h = AV_RL16(&buf[8]);
        q = buf[10];
        if (!codec_reinit(avctx, w, h, q))
            return -1;
        buf = &buf[12];
        buf_size -= 12;
    }

    if (keyframe && c->pic.data[0])
        avctx->release_buffer(avctx, &c->pic);
    c->pic.reference = 3;
    c->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_READABLE |
                          FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
    result = avctx->reget_buffer(avctx, &c->pic);
    if (result < 0) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return -1;
    }

    c->pic.pict_type = keyframe ? FF_I_TYPE : FF_P_TYPE;
    c->pic.key_frame = keyframe;
    // decompress/copy/whatever data
    switch (comptype) {
        case NUV_LZO:
        case NUV_UNCOMPRESSED: {
            int height = c->height;
            if (buf_size < c->width * height * 3 / 2) {
                av_log(avctx, AV_LOG_ERROR, "uncompressed frame too short\n");
                height = buf_size / c->width / 3 * 2;
            }
            copy_frame(&c->pic, buf, c->width, height);
            break;
        }
        case NUV_RTJPEG_IN_LZO:
        case NUV_RTJPEG: {
            rtjpeg_decode_frame_yuv420(&c->rtj, &c->pic, buf, buf_size);
            break;
        }
        case NUV_BLACK: {
            memset(c->pic.data[0], 0, c->width * c->height);
            memset(c->pic.data[1], 128, c->width * c->height / 4);
            memset(c->pic.data[2], 128, c->width * c->height / 4);
            break;
        }
        case NUV_COPY_LAST: {
            /* nothing more to do here */
            break;
        }
        default:
            av_log(avctx, AV_LOG_ERROR, "unknown compression\n");
            return -1;
    }

    *picture = c->pic;
    *data_size = sizeof(AVFrame);
    return orig_size;
}
Beispiel #5
0
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
                        AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size       = avpkt->size;
    NuvContext *c      = avctx->priv_data;
    AVFrame *picture   = data;
    int orig_size      = buf_size;
    int keyframe, ret;
    int size_change = 0;
    int result, init_frame = !avctx->frame_number;
    enum {
        NUV_UNCOMPRESSED  = '0',
        NUV_RTJPEG        = '1',
        NUV_RTJPEG_IN_LZO = '2',
        NUV_LZO           = '3',
        NUV_BLACK         = 'N',
        NUV_COPY_LAST     = 'L'
    } comptype;

    if (buf_size < 12) {
        av_log(avctx, AV_LOG_ERROR, "coded frame too small\n");
        return AVERROR_INVALIDDATA;
    }

    // codec data (rtjpeg quant tables)
    if (buf[0] == 'D' && buf[1] == 'R') {
        int ret;
        // skip rest of the frameheader.
        buf       = &buf[12];
        buf_size -= 12;
        ret       = get_quant(avctx, c, buf, buf_size);
        if (ret < 0)
            return ret;
        ff_rtjpeg_decode_init(&c->rtj, c->width, c->height, c->lq, c->cq);
        return orig_size;
    }

    if (buf_size < 12 || buf[0] != 'V') {
        av_log(avctx, AV_LOG_ERROR, "not a nuv video frame\n");
        return AVERROR_INVALIDDATA;
    }
    comptype = buf[1];
    switch (comptype) {
    case NUV_RTJPEG_IN_LZO:
    case NUV_RTJPEG:
        keyframe = !buf[2];
        break;
    case NUV_COPY_LAST:
        keyframe = 0;
        break;
    default:
        keyframe = 1;
        break;
    }
retry:
    // skip rest of the frameheader.
    buf       = &buf[12];
    buf_size -= 12;
    if (comptype == NUV_RTJPEG_IN_LZO || comptype == NUV_LZO) {
        int outlen = c->decomp_size - FFMAX(FF_INPUT_BUFFER_PADDING_SIZE, AV_LZO_OUTPUT_PADDING);
        int inlen  = buf_size;
        if (av_lzo1x_decode(c->decomp_buf, &outlen, buf, &inlen)) {
            av_log(avctx, AV_LOG_ERROR, "error during lzo decompression\n");
            return AVERROR_INVALIDDATA;
        }
        buf      = c->decomp_buf;
        buf_size = c->decomp_size - FFMAX(FF_INPUT_BUFFER_PADDING_SIZE, AV_LZO_OUTPUT_PADDING) - outlen;
        memset(c->decomp_buf + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
    }
    if (c->codec_frameheader) {
        int w, h, q;
        if (buf_size < RTJPEG_HEADER_SIZE) {
            av_log(avctx, AV_LOG_ERROR, "Too small NUV video frame\n");
            return AVERROR_INVALIDDATA;
        }
        // There seem to exist two variants of this header: one starts with 'V'
        // and 5 bytes unknown, the other matches current MythTV and is 4 bytes size,
        // 1 byte header size (== 12), 1 byte version (== 0)
        if (buf[0] != 'V' && AV_RL16(&buf[4]) != 0x000c) {
            av_log(avctx, AV_LOG_ERROR, "Unknown secondary frame header (wrong codec_tag?)\n");
            return AVERROR_INVALIDDATA;
        }
        w = AV_RL16(&buf[6]);
        h = AV_RL16(&buf[8]);
        q = buf[10];
        if ((result = codec_reinit(avctx, w, h, q)) < 0)
            return result;
        if (result) {
            buf = avpkt->data;
            buf_size = avpkt->size;
            size_change = 1;
            goto retry;
        }
        buf       = &buf[RTJPEG_HEADER_SIZE];
        buf_size -= RTJPEG_HEADER_SIZE;
    }

    if (size_change || keyframe) {
        av_frame_unref(c->pic);
        init_frame = 1;
    }

    if ((result = ff_reget_buffer(avctx, c->pic)) < 0)
        return result;
    if (init_frame) {
        memset(c->pic->data[0], 0,    avctx->height * c->pic->linesize[0]);
        memset(c->pic->data[1], 0x80, avctx->height * c->pic->linesize[1] / 2);
        memset(c->pic->data[2], 0x80, avctx->height * c->pic->linesize[2] / 2);
    }

    c->pic->pict_type = keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
    c->pic->key_frame = keyframe;
    // decompress/copy/whatever data
    switch (comptype) {
    case NUV_LZO:
    case NUV_UNCOMPRESSED: {
        int height = c->height;
        if (buf_size < c->width * height * 3 / 2) {
            av_log(avctx, AV_LOG_ERROR, "uncompressed frame too short\n");
            height = buf_size / c->width / 3 * 2;
        }
        if(height > 0)
            copy_frame(c->pic, buf, c->width, height);
        break;
    }
    case NUV_RTJPEG_IN_LZO:
    case NUV_RTJPEG:
        ret = ff_rtjpeg_decode_frame_yuv420(&c->rtj, c->pic, buf, buf_size);
        if (ret < 0)
            return ret;
        break;
    case NUV_BLACK:
        memset(c->pic->data[0], 0, c->width * c->height);
        memset(c->pic->data[1], 128, c->width * c->height / 4);
        memset(c->pic->data[2], 128, c->width * c->height / 4);
        break;
    case NUV_COPY_LAST:
        /* nothing more to do here */
        break;
    default:
        av_log(avctx, AV_LOG_ERROR, "unknown compression\n");
        return AVERROR_INVALIDDATA;
    }

    if ((result = av_frame_ref(picture, c->pic)) < 0)
        return result;

    *got_frame = 1;
    return orig_size;
}
int lwlibav_get_video_frame
(
    lwlibav_video_decode_handler_t *vdhp,
    lwlibav_video_output_handler_t *vohp,
    uint32_t                        frame_number
)
{
    if( !vohp->repeat_control )
        return get_requested_picture( vdhp, vdhp->frame_buffer, frame_number );
    /* Get picture to applied the repeat control. */
    uint32_t t = vohp->frame_order_list[frame_number].top;
    uint32_t b = vohp->frame_order_list[frame_number].bottom;
    uint32_t first_field_number  = MIN( t, b );
    uint32_t second_field_number = MAX( t, b );
    /* Check repeat targets and cache datas. */
    enum
    {
        REPEAT_CONTROL_COPIED_FROM_CACHE   = 0x00,
        REPEAT_CONTROL_DECODE_TOP_FIELD    = 0x01,
        REPEAT_CONTROL_DECODE_BOTTOM_FIELD = 0x02,
        REPEAT_CONTROL_DECODE_BOTH_FIELDS  = 0x03,  /* REPEAT_CONTROL_DECODE_TOP_FIELD | REPEAT_CONTROL_DECODE_BOTTOM_FIELD */
        REPEAT_CONTROL_DECODE_ONE_FRAME    = 0x04
    };
    int repeat_control;
    if( first_field_number == second_field_number )
    {
        repeat_control = REPEAT_CONTROL_DECODE_ONE_FRAME;
        if( first_field_number == vohp->frame_cache_numbers[0] )
            return copy_frame( &vdhp->lh, vdhp->frame_buffer, vohp->frame_cache_buffers[0] );
        if( first_field_number == vohp->frame_cache_numbers[1] )
            return copy_frame( &vdhp->lh, vdhp->frame_buffer, vohp->frame_cache_buffers[1] );
        if( first_field_number != vohp->frame_order_list[frame_number - 1].top
         && first_field_number != vohp->frame_order_list[frame_number - 1].bottom
         && first_field_number != vohp->frame_order_list[frame_number + 1].top
         && first_field_number != vohp->frame_order_list[frame_number + 1].bottom )
        {
            if( get_requested_picture( vdhp, vdhp->frame_buffer, first_field_number ) < 0 )
                return -1;
            /* Treat this frame as interlaced. */
            vdhp->frame_buffer->interlaced_frame = 1;
            return 0;
        }
    }
    else
    {
        repeat_control = REPEAT_CONTROL_DECODE_BOTH_FIELDS;
        for( int i = 0; i < REPEAT_CONTROL_CACHE_NUM; i++ )
        {
            if( t == vohp->frame_cache_numbers[i] )
            {
                if( copy_field( &vdhp->lh, vdhp->frame_buffer, vohp->frame_cache_buffers[i], 0 ) < 0 )
                    return -1;
                repeat_control &= ~REPEAT_CONTROL_DECODE_TOP_FIELD;
            }
            if( b == vohp->frame_cache_numbers[i] )
            {
                if( copy_field( &vdhp->lh, vdhp->frame_buffer, vohp->frame_cache_buffers[i], 1 ) < 0 )
                    return -1;
                repeat_control &= ~REPEAT_CONTROL_DECODE_BOTTOM_FIELD;
            }
        }
        if( repeat_control == REPEAT_CONTROL_COPIED_FROM_CACHE )
            return 0;
    }
    /* Decode target frames and copy to output buffer. */
    if( repeat_control == REPEAT_CONTROL_DECODE_BOTH_FIELDS )
    {
        /* Decode 2 frames, and copy each a top and bottom fields. */
        if( get_requested_picture( vdhp, vohp->frame_cache_buffers[0], first_field_number ) < 0 )
            return -1;
        vohp->frame_cache_numbers[0] = first_field_number;
        if( get_requested_picture( vdhp, vohp->frame_cache_buffers[1], second_field_number ) < 0 )
            return -1;
        vohp->frame_cache_numbers[1] = second_field_number;
        if( check_frame_buffer_identical( vohp->frame_cache_buffers[0], vohp->frame_cache_buffers[1] ) )
            return copy_frame( &vdhp->lh, vdhp->frame_buffer, vohp->frame_cache_buffers[0] );
        if( copy_field( &vdhp->lh, vdhp->frame_buffer, vohp->frame_cache_buffers[0], t > b ? 1 : 0 ) < 0
         || copy_field( &vdhp->lh, vdhp->frame_buffer, vohp->frame_cache_buffers[1], t < b ? 1 : 0 ) < 0 )
            return -1;
        return 0;
    }
    else
    {
        /* Decode 1 frame, and copy 1 frame or 1 field. */
        int decode_number = repeat_control == REPEAT_CONTROL_DECODE_ONE_FRAME ? first_field_number
                          : repeat_control == REPEAT_CONTROL_DECODE_TOP_FIELD ? t : b;
        int idx = vohp->frame_cache_numbers[0] > vohp->frame_cache_numbers[1] ? 1 : 0;
        if( get_requested_picture( vdhp, vohp->frame_cache_buffers[idx], decode_number ) < 0 )
            return -1;
        vohp->frame_cache_numbers[idx] = decode_number;
        if( repeat_control == REPEAT_CONTROL_DECODE_ONE_FRAME )
            return copy_frame( &vdhp->lh, vdhp->frame_buffer, vohp->frame_cache_buffers[idx] );
        else
            return copy_field( &vdhp->lh, vdhp->frame_buffer, vohp->frame_cache_buffers[idx],
                               repeat_control == REPEAT_CONTROL_DECODE_TOP_FIELD ? 0 : 1 );
    }
}
Beispiel #7
0
int
upet_to_minc(char *hdr_fname, char *img_fname, char *out_fname, 
             char *prog_name)
{
    char *line_ptr;
    char line_buf[1024];
    char *val_ptr;
    int in_header;
    double dbl_tmp;
    int int_tmp;
    struct conversion_info ci;
    struct keywd_entry *ke_ptr;
    int is_known;
    char *argv_tmp[5];
    char *out_history;

    ci.hdr_fp = fopen(hdr_fname, "r"); /* Text file */
    if (ci.hdr_fp == NULL) {
        perror(hdr_fname);
        return (-1);
    }

    ci.img_fp = fopen(img_fname, "rb"); /* Binary file */
    if (ci.img_fp == NULL) {
        perror(img_fname);
        return (-1);
    }

    ci.mnc_fd = micreate(out_fname, NC_NOCLOBBER);
    if (ci.mnc_fd < 0) {
        perror(out_fname);
        return (-1);
    }

    ci.frame_zero = -1;     /* Initial frame is -1 until set. */

    /* Define the basic MINC group variables.
     */
    micreate_group_variable(ci.mnc_fd, MIstudy);
    micreate_group_variable(ci.mnc_fd, MIacquisition);
    micreate_group_variable(ci.mnc_fd, MIpatient);
    ncvardef(ci.mnc_fd, "micropet", NC_SHORT, 0, NULL);

    /* Fake the history here */
    argv_tmp[0] = prog_name;
    argv_tmp[1] = VERSIONSTR;
    argv_tmp[2] = hdr_fname;
    argv_tmp[3] = img_fname;
    argv_tmp[4] = out_fname;

    out_history = time_stamp(5, argv_tmp);

    miattputstr(ci.mnc_fd, NC_GLOBAL, MIhistory, out_history);
    free(out_history);
        
    in_header = 1;

    ci.frame_nbytes = 1;
    ci.frame_nvoxels = 1;

    /* When we read voxels, we need COMBINED_SCALE_FACTOR() to have a sane
     * value for all modalities. Set defaults for these in case the modality
     * does not define one of these factors. For example, a CT (modality 2)
     * will not define isotope_branching_fraction or calibration_factor.
     */

    ci.scale_factor = 1.0;
    ci.calibration_factor = 1.0;
    ci.isotope_branching_fraction = 1.0;

    /* Collect the headers */
    while (fgets(line_buf, sizeof(line_buf), ci.hdr_fp) != NULL) {
        if (line_buf[0] == '#') /*  */
            continue;
        line_ptr = line_buf;
        while (!isspace(*line_ptr)) {
            line_ptr++;
        }
        *line_ptr++ = '\0';
        val_ptr = line_ptr;
        while (*line_ptr != '\n' && *line_ptr != '\r' && *line_ptr != '\0') {
            line_ptr++;
        }
        *line_ptr = '\0';
            
        is_known = 0;

        if (in_header) {
            if (*val_ptr != '\0') {
                /* Save the raw attribute into the file */
                ncattput(ci.mnc_fd, ncvarid(ci.mnc_fd, "micropet"),
                         line_buf, NC_CHAR, strlen(val_ptr), val_ptr);
            }

            for (ke_ptr = vol_atts; ke_ptr->upet_kwd != NULL; ke_ptr++) {
                if (!strcmp(ke_ptr->upet_kwd, line_buf)) {
                    
                    is_known = 1;

                    if (ke_ptr->func != NULL) {
                        (*ke_ptr->func)(&ci, val_ptr, 
                                        ke_ptr->mnc_var,
                                        ke_ptr->mnc_att);
                    }
                    else if (ke_ptr->mnc_var != NULL &&
                             ke_ptr->mnc_att != NULL) {

                        /* Interpret based upon type */
                        switch (ke_ptr->upet_type) {
                        case UPET_TYPE_INT:
                            int_tmp = atoi(val_ptr);
                            miattputint(ci.mnc_fd, 
                                        ncvarid(ci.mnc_fd, ke_ptr->mnc_var),
                                        ke_ptr->mnc_att,
                                        int_tmp);
                            break;

                        case UPET_TYPE_REAL:
                            dbl_tmp = atof(val_ptr);
                            miattputdbl(ci.mnc_fd, 
                                        ncvarid(ci.mnc_fd, ke_ptr->mnc_var),
                                        ke_ptr->mnc_att,
                                        dbl_tmp);
                            break;

                        case UPET_TYPE_STR:
                            miattputstr(ci.mnc_fd, 
                                        ncvarid(ci.mnc_fd, ke_ptr->mnc_var),
                                        ke_ptr->mnc_att,
                                        val_ptr);
                            break;

                        }
                        
                    }
                    break;
                }
            }
        }
        else {
            /* Not in the header any longer 
             */
            for (ke_ptr = frm_atts; ke_ptr->upet_kwd != NULL; ke_ptr++) {
                if (!strcmp(ke_ptr->upet_kwd, line_buf)) {
                    
                    is_known = 1;
                    
                    if (ke_ptr->func != NULL) {
                        (*ke_ptr->func)(&ci, val_ptr, 
                                        ke_ptr->mnc_var,
                                        ke_ptr->mnc_att);
                    }
                    break;
                }
            }
        }

        if (!is_known) {
            if (!strcmp(line_buf, "end_of_header")) {
                if (in_header) {
                    in_header = 0;

                    copy_init(&ci);

                }
                else {
                    copy_frame(&ci);
                }
            }
            else {
                message(MSG_WARNING, "Unrecognized keyword %s\n", line_buf);
            }
        }
    }

    fclose(ci.hdr_fp);
    fclose(ci.img_fp);
    miclose(ci.mnc_fd);
    return (0);
}