/** * ffmpeg_open * Opens an mpeg file using the new libavformat method. Both mpeg1 * and mpeg4 are supported. However, if the current ffmpeg version doesn't allow * mpeg1 with non-standard framerate, the open will fail. Timelapse is a special * case and is tested separately. * * Returns * A new allocated ffmpeg struct or NULL if any error happens. */ struct ffmpeg *ffmpeg_open(char *ffmpeg_video_codec, char *filename, unsigned char *y, unsigned char *u, unsigned char *v, int width, int height, int rate, int bps, int vbr) { AVCodecContext *c; AVCodec *codec; struct ffmpeg *ffmpeg; int is_mpeg1; int ret; /* * Allocate space for our ffmpeg structure. This structure contains all the * codec and image information we need to generate movies. * FIXME when motion exits we should close the movie to ensure that * ffmpeg is freed. */ ffmpeg = mymalloc(sizeof(struct ffmpeg)); memset(ffmpeg, 0, sizeof(struct ffmpeg)); ffmpeg->vbr = vbr; /* Store codec name in ffmpeg->codec, with buffer overflow check. */ snprintf(ffmpeg->codec, sizeof(ffmpeg->codec), "%s", ffmpeg_video_codec); /* Allocation the output media context. */ #ifdef have_avformat_alloc_context ffmpeg->oc = avformat_alloc_context(); #elif defined have_av_avformat_alloc_context ffmpeg->oc = av_alloc_format_context(); #else ffmpeg->oc = av_mallocz(sizeof(AVFormatContext)); #endif if (!ffmpeg->oc) { MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Memory error while allocating" " output media context"); ffmpeg_cleanups(ffmpeg); return NULL; } /* Setup output format */ ffmpeg->oc->oformat = get_oformat(ffmpeg_video_codec, filename); if (!ffmpeg->oc->oformat) { ffmpeg_cleanups(ffmpeg); return NULL; } snprintf(ffmpeg->oc->filename, sizeof(ffmpeg->oc->filename), "%s", filename); /* Create a new video stream and initialize the codecs. */ ffmpeg->video_st = NULL; if (ffmpeg->oc->oformat->video_codec != CODEC_ID_NONE) { #if defined FF_API_NEW_AVIO ffmpeg->video_st = avformat_new_stream(ffmpeg->oc, NULL /* Codec */); #else ffmpeg->video_st = av_new_stream(ffmpeg->oc, 0); #endif if (!ffmpeg->video_st) { MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: av_new_stream - could" " not alloc stream"); ffmpeg_cleanups(ffmpeg); return NULL; } } else { /* We did not get a proper video codec. */ MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: Failed to obtain a proper" " video codec"); ffmpeg_cleanups(ffmpeg); return NULL; } ffmpeg->c = c = AVSTREAM_CODEC_PTR(ffmpeg->video_st); c->codec_id = ffmpeg->oc->oformat->video_codec; #if LIBAVCODEC_VERSION_MAJOR < 53 c->codec_type = CODEC_TYPE_VIDEO; #else c->codec_type = AVMEDIA_TYPE_VIDEO; #endif is_mpeg1 = c->codec_id == CODEC_ID_MPEG1VIDEO; if (strcmp(ffmpeg_video_codec, "ffv1") == 0) c->strict_std_compliance = -2; /* Uncomment to allow non-standard framerates. */ //c->strict_std_compliance = -1; /* Set default parameters */ c->bit_rate = bps; c->width = width; c->height = height; #if LIBAVCODEC_BUILD >= 4754 /* Frame rate = 1/time_base, so we set 1/rate, not rate/1 */ c->time_base.num = 1; c->time_base.den = rate; #else c->frame_rate = rate; c->frame_rate_base = 1; #endif /* LIBAVCODEC_BUILD >= 4754 */ MOTION_LOG(INF, TYPE_ENCODER, NO_ERRNO, "%s FPS %d", rate); if (vbr) c->flags |= CODEC_FLAG_QSCALE; /* * Set codec specific parameters. * Set intra frame distance in frames depending on codec. */ c->gop_size = is_mpeg1 ? 10 : 12; /* Some formats want stream headers to be separate. */ if (!strcmp(ffmpeg->oc->oformat->name, "mp4") || !strcmp(ffmpeg->oc->oformat->name, "mov") || !strcmp(ffmpeg->oc->oformat->name, "3gp")) { c->flags |= CODEC_FLAG_GLOBAL_HEADER; } #if defined FF_API_NEW_AVIO // pass the options to avformat_write_header directly #else /* Set the output parameters (must be done even if no parameters). */ if (av_set_parameters(ffmpeg->oc, NULL) < 0) { MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: av_set_parameters error:" " Invalid output format parameters"); ffmpeg_cleanups(ffmpeg); return NULL; } #endif /* Dump the format settings. This shows how the various streams relate to each other. */ //dump_format(ffmpeg->oc, 0, filename, 1); /* * Now that all the parameters are set, we can open the video * codec and allocate the necessary encode buffers. */ codec = avcodec_find_encoder(c->codec_id); if (!codec) { MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: Codec %s not found", ffmpeg_video_codec); ffmpeg_cleanups(ffmpeg); return NULL; } /* Set the picture format - need in ffmpeg starting round April-May 2005 */ c->pix_fmt = PIX_FMT_YUV420P; /* Get a mutex lock. */ pthread_mutex_lock(&global_lock); /* Open the codec */ #if defined FF_API_NEW_AVIO ret = avcodec_open2(c, codec, NULL /* options */ ); #else ret = avcodec_open(c, codec); #endif if (ret < 0) { /* Release the lock. */ pthread_mutex_unlock(&global_lock); MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: avcodec_open - could not open codec %s", ffmpeg_video_codec); ffmpeg_cleanups(ffmpeg); return NULL; } /* Release the lock. */ pthread_mutex_unlock(&global_lock); ffmpeg->video_outbuf = NULL; if (!(ffmpeg->oc->oformat->flags & AVFMT_RAWPICTURE)) { /* * Allocate output buffer * XXX: API change will be done * ffmpeg->video_outbuf_size = 200000 */ ffmpeg->video_outbuf_size = ffmpeg->c->width * 512; ffmpeg->video_outbuf = mymalloc(ffmpeg->video_outbuf_size); } /* Allocate the encoded raw picture. */ ffmpeg->picture = avcodec_alloc_frame(); if (!ffmpeg->picture) { MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: avcodec_alloc_frame -" " could not alloc frame"); ffmpeg_cleanups(ffmpeg); return NULL; } /* Set variable bitrate if requested. */ if (ffmpeg->vbr) ffmpeg->picture->quality = ffmpeg->vbr; /* Set the frame data. */ ffmpeg->picture->data[0] = y; ffmpeg->picture->data[1] = u; ffmpeg->picture->data[2] = v; ffmpeg->picture->linesize[0] = ffmpeg->c->width; ffmpeg->picture->linesize[1] = ffmpeg->c->width / 2; ffmpeg->picture->linesize[2] = ffmpeg->c->width / 2; /* Open the output file, if needed. */ if (!(ffmpeg->oc->oformat->flags & AVFMT_NOFILE)) { char file_proto[256]; /* * Use append file protocol for mpeg1, to get the append behavior from * url_fopen, but no protocol (=> default) for other codecs. */ if (is_mpeg1) #if defined FF_API_NEW_AVIO snprintf(file_proto, sizeof(file_proto), "%s", filename); #else snprintf(file_proto, sizeof(file_proto), APPEND_PROTO ":%s", filename); #endif else
static int netcam_read_rtsp_image(netcam_context_ptr netcam) { if (netcam->rtsp == NULL) { if (rtsp_connect(netcam) < 0) { return -1; } } AVCodecContext *cc = netcam->rtsp->codec_context; AVFormatContext *fc = netcam->rtsp->format_context; netcam_buff_ptr buffer; /* Point to our working buffer. */ buffer = netcam->receiving; buffer->used = 0; AVFrame *frame = avcodec_alloc_frame(); AVPacket packet; av_init_packet(&packet); packet.data = NULL; packet.size = 0; int size_decoded = 0; static int usual_size_decoded = 0; while (size_decoded == 0 && av_read_frame(fc, &packet) >= 0) { if(packet.stream_index != netcam->rtsp->video_stream_index) { // not our packet, skip continue; } size_decoded = decode_packet(&packet, buffer, frame, cc); } if (size_decoded == 0) { // something went wrong, end of stream? MOTION_LOG(ERR, TYPE_NETCAM, SHOW_ERRNO, "%s: invalid frame!"); return -1; } if (size_decoded != usual_size_decoded) { MOTION_LOG(WRN, TYPE_NETCAM, SHOW_ERRNO, "%s: unusual frame size of %d!", size_decoded); usual_size_decoded = size_decoded; } // at this point, we are finished with the packet and frame, so free them. av_free_packet(&packet); av_free(frame); struct timeval curtime; if (gettimeofday(&curtime, NULL) < 0) { MOTION_LOG(WRN, TYPE_NETCAM, SHOW_ERRNO, "%s: gettimeofday"); } netcam->receiving->image_time = curtime; /* * Calculate our "running average" time for this netcam's * frame transmissions (except for the first time). * Note that the average frame time is held in microseconds. */ if (netcam->last_image.tv_sec) { netcam->av_frame_time = ((9.0 * netcam->av_frame_time) + 1000000.0 * (curtime.tv_sec - netcam->last_image.tv_sec) + (curtime.tv_usec- netcam->last_image.tv_usec)) / 10.0; MOTION_LOG(DBG, TYPE_NETCAM, NO_ERRNO, "%s: Calculated frame time %f", netcam->av_frame_time); } netcam->last_image = curtime; netcam_buff *xchg; /* * read is complete - set the current 'receiving' buffer atomically * as 'latest', and make the buffer previously in 'latest' become * the new 'receiving'. */ pthread_mutex_lock(&netcam->mutex); xchg = netcam->latest; netcam->latest = netcam->receiving; netcam->receiving = xchg; netcam->imgcnt++; /* * We have a new frame ready. We send a signal so that * any thread (e.g. the motion main loop) waiting for the * next frame to become available may proceed. */ pthread_cond_signal(&netcam->pic_ready); pthread_mutex_unlock(&netcam->mutex); return 0; }
/** * ffmpeg_put_frame * Encodes and writes a video frame using the av_write_frame API. This is * a helper function for ffmpeg_put_image and ffmpeg_put_other_image. * * Returns * Number of bytes written or -1 if any error happens. */ int ffmpeg_put_frame(struct ffmpeg *ffmpeg, AVFrame *pic) { int out_size, ret; #ifdef FFMPEG_AVWRITEFRAME_NEWAPI AVPacket pkt; av_init_packet(&pkt); /* Init static structure. */ pkt.stream_index = ffmpeg->video_st->index; #endif /* FFMPEG_AVWRITEFRAME_NEWAPI */ if (ffmpeg->oc->oformat->flags & AVFMT_RAWPICTURE) { /* Raw video case. The API will change slightly in the near future for that. */ #ifdef FFMPEG_AVWRITEFRAME_NEWAPI #if LIBAVCODEC_VERSION_MAJOR < 53 pkt.flags |= PKT_FLAG_KEY; #else pkt.flags |= AV_PKT_FLAG_KEY; #endif pkt.data = (uint8_t *)pic; pkt.size = sizeof(AVPicture); ret = av_write_frame(ffmpeg->oc, &pkt); #else ret = av_write_frame(ffmpeg->oc, ffmpeg->video_st->index, (uint8_t *)pic, sizeof(AVPicture)); #endif /* FFMPEG_AVWRITEFRAME_NEWAPI */ } else { /* Encodes the image. */ out_size = avcodec_encode_video(AVSTREAM_CODEC_PTR(ffmpeg->video_st), ffmpeg->video_outbuf, ffmpeg->video_outbuf_size, pic); /* If zero size, it means the image was buffered. */ if (out_size != 0) { /* * Writes the compressed frame in the media file. * XXX: in case of B frames, the pts is not yet valid. */ #ifdef FFMPEG_AVWRITEFRAME_NEWAPI pkt.pts = AVSTREAM_CODEC_PTR(ffmpeg->video_st)->coded_frame->pts; if (AVSTREAM_CODEC_PTR(ffmpeg->video_st)->coded_frame->key_frame) #if LIBAVCODEC_VERSION_MAJOR < 53 pkt.flags |= PKT_FLAG_KEY; #else pkt.flags |= AV_PKT_FLAG_KEY; #endif pkt.data = ffmpeg->video_outbuf; pkt.size = out_size; ret = av_write_frame(ffmpeg->oc, &pkt); #else ret = av_write_frame(ffmpeg->oc, ffmpeg->video_st->index, ffmpeg->video_outbuf, out_size); #endif /* FFMPEG_AVWRITEFRAME_NEWAPI */ } else { ret = 0; } } if (ret != 0) { MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Error while writing" " video frame"); ffmpeg_cleanups(ffmpeg); return -1; } return ret; }
/** * get_oformat * Obtains the output format used for the specified codec. For mpeg4 codecs, * the format is avi; for mpeg1 codec, the format is mpeg. The filename has * to be passed, because it gets the appropriate extension appended onto it. * * Returns * AVOutputFormat pointer or NULL if any error happens. */ static AVOutputFormat *get_oformat(const char *codec, char *filename) { const char *ext; AVOutputFormat *of = NULL; /* * Here, we use guess_format to automatically setup the codec information. * If we are using msmpeg4, manually set that codec here. * We also dynamically add the file extension to the filename here. This was * done to support both mpeg1 and mpeg4 codecs since they have different extensions. */ if ((strcmp(codec, TIMELAPSE_CODEC) == 0) #ifndef FFMPEG_NO_NONSTD_MPEG1 || (strcmp(codec, "mpeg1") == 0) #endif ) { ext = ".mpg"; /* * We use "mpeg1video" for raw mpeg1 format. Using "mpeg" would * result in a muxed output file, which isn't appropriate here. */ #ifdef GUESS_NO_DEPRECATED of = guess_format("mpeg1video", NULL, NULL); #else of = av_guess_format("mpeg1video", NULL, NULL); #endif /* But we want the trailer to be correctly written. */ if (of) of->write_trailer = mpeg1_write_trailer; #ifdef FFMPEG_NO_NONSTD_MPEG1 } else if (strcmp(codec, "mpeg1") == 0) { MOTION_LOG(WRN, TYPE_ENCODER, NO_ERRNO, "%s: *** mpeg1 support for normal" " videos has been disabled ***"); return NULL; #endif } else if (strcmp(codec, "mpeg4") == 0) { ext = ".avi"; #ifdef GUESS_NO_DEPRECATED of = guess_format("mpeg1video", NULL, NULL); #else of = av_guess_format("avi", NULL, NULL); #endif } else if (strcmp(codec, "msmpeg4") == 0) { ext = ".avi"; #ifdef GUESS_NO_DEPRECATED of = guess_format("mpeg1video", NULL, NULL); #else of = av_guess_format("avi", NULL, NULL); #endif /* Manually override the codec id. */ if (of) of->video_codec = CODEC_ID_MSMPEG4V2; } else if (strcmp(codec, "swf") == 0) { ext = ".swf"; #ifdef GUESS_NO_DEPRECATED of = guess_format("mpeg1video", NULL, NULL); #else of = av_guess_format("swf", NULL, NULL); #endif } else if (strcmp(codec, "flv") == 0) { ext = ".flv"; #ifdef GUESS_NO_DEPRECATED of = guess_format("mpeg1video", NULL, NULL); #else of = av_guess_format("flv", NULL, NULL); #endif of->video_codec = CODEC_ID_FLV1; } else if (strcmp(codec, "ffv1") == 0) { ext = ".avi"; #ifdef GUESS_NO_DEPRECATED of = guess_format("mpeg1video", NULL, NULL); #else of = av_guess_format("avi", NULL, NULL); #endif /* * Use the FFMPEG Lossless Video codec (experimental!). * Requires strict_std_compliance to be <= -2 */ if (of) of->video_codec = CODEC_ID_FFV1; } else if (strcmp(codec, "mov") == 0) { ext = ".mov"; #ifdef GUESS_NO_DEPRECATED of = guess_format("mpeg1video", NULL, NULL); #else of = av_guess_format("mov", NULL, NULL); #endif } else if (strcmp (codec, "ogg") == 0) { ext = ".ogg"; #ifdef GUESS_NO_DEPRECATED of = guess_format ("ogg", NULL, NULL); #else of = av_guess_format ("ogg", NULL, NULL); #endif } else { MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: ffmpeg_video_codec option value" " %s is not supported", codec); return NULL; } if (!of) { MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: Could not guess format for %s", codec); return NULL; } /* The 4 allows for ".avi" or ".mpg" to be appended. */ strncat(filename, ext, 4); return of; }
int bktr_start(struct context *cnt) { #ifdef HAVE_BKTR struct config *conf = &cnt->conf; struct video_dev *dev; int fd_tuner = -1; int width, height, capture_method; unsigned input, norm; unsigned long frequency; int fd_device = -1; MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: [%s]", conf->video_device); /* * We use width and height from conf in this function. They will be assigned * to width and height in imgs here, and cap_width and cap_height in * rotate_data won't be set until in rotate_init. * Motion requires that width and height are multiples of 8 so we check for this. */ if (conf->width % 8) { MOTION_LOG(CRT, TYPE_VIDEO, NO_ERRNO, "%s: config image width (%d) is not modulo 8", conf->width); return -2; } if (conf->height % 8) { MOTION_LOG(CRT, TYPE_VIDEO, NO_ERRNO, "%s: config image height (%d) is not modulo 8", conf->height); return -2; } width = conf->width; height = conf->height; input = conf->input; norm = conf->norm; frequency = conf->frequency; capture_method = METEOR_CAP_CONTINOUS; pthread_mutex_lock(&bktr_mutex); /* * Transfer width and height from conf to imgs. The imgs values are the ones * that is used internally in Motion. That way, setting width and height via * http remote control won't screw things up. */ cnt->imgs.width = width; cnt->imgs.height = height; /* * First we walk through the already discovered video devices to see * if we have already setup the same device before. If this is the case * the device is a Round Robin device and we set the basic settings * and return the file descriptor. */ dev = viddevs; while (dev) { if (!strcmp(conf->video_device, dev->video_device)) { int dummy = METEOR_CAP_STOP_CONT; dev->usage_count++; cnt->imgs.type = dev->v4l_fmt; if (ioctl(dev->fd_device, METEORCAPTUR, &dummy) < 0) { MOTION_LOG(CRT, TYPE_VIDEO, SHOW_ERRNO, "%s Stopping capture"); return -1; } MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s Reusing [%s] inputs [%d,%d] Change " "capture method METEOR_CAP_SINGLE", dev->video_device, dev->input, conf->input); dev->capture_method = METEOR_CAP_SINGLE; switch (cnt->imgs.type) { case VIDEO_PALETTE_GREY: cnt->imgs.motionsize = width * height; cnt->imgs.size = width * height; break; case VIDEO_PALETTE_RGB24: case VIDEO_PALETTE_YUV422: cnt->imgs.type = VIDEO_PALETTE_YUV420P; case VIDEO_PALETTE_YUV420P: MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s VIDEO_PALETTE_YUV420P setting" " imgs.size and imgs.motionsize"); cnt->imgs.motionsize = width * height; cnt->imgs.size = (width * height * 3) / 2; break; } pthread_mutex_unlock(&bktr_mutex); return dev->fd_device; // FIXME return fd_tuner ?! } dev = dev->next; } dev = mymalloc(sizeof(struct video_dev)); fd_device = open(conf->video_device, O_RDWR); if (fd_device < 0) { MOTION_LOG(CRT, TYPE_VIDEO, SHOW_ERRNO, "%s: open video device %s", conf->video_device); free(dev); pthread_mutex_unlock(&bktr_mutex); return -1; } /* Only open tuner if conf->tuner_device has set , freq and input is 1. */ if ((conf->tuner_device != NULL) && (frequency > 0) && (input == BKTR_IN_TV)) { fd_tuner = open(conf->tuner_device, O_RDWR); if (fd_tuner < 0) { MOTION_LOG(CRT, TYPE_VIDEO, SHOW_ERRNO, "%s: open tuner device %s", conf->tuner_device); free(dev); pthread_mutex_unlock(&bktr_mutex); return -1; } } pthread_mutexattr_init(&dev->attr); pthread_mutex_init(&dev->mutex, &dev->attr); dev->usage_count = 1; dev->video_device = conf->video_device; dev->tuner_device = conf->tuner_device; dev->fd_device = fd_device; dev->fd_tuner = fd_tuner; dev->input = input; dev->height = height; dev->width = width; dev->freq = frequency; dev->owner = -1; dev->capture_method = capture_method; /* * We set brightness, contrast, saturation and hue = 0 so that they only get * set if the config is not zero. */ dev->brightness = 0; dev->contrast = 0; dev->saturation = 0; dev->hue = 0; dev->owner = -1; /* Default palette */ dev->v4l_fmt = VIDEO_PALETTE_YUV420P; dev->v4l_curbuffer = 0; dev->v4l_maxbuffer = 1; if (!bktr_device_init(dev, width, height, input, norm, frequency)) { close(dev->fd_device); pthread_mutexattr_destroy(&dev->attr); pthread_mutex_destroy(&dev->mutex); free(dev); pthread_mutex_unlock(&bktr_mutex); return -1; } cnt->imgs.type = dev->v4l_fmt; switch (cnt->imgs.type) { case VIDEO_PALETTE_GREY: cnt->imgs.size = width * height; cnt->imgs.motionsize = width * height; break; case VIDEO_PALETTE_RGB24: case VIDEO_PALETTE_YUV422: cnt->imgs.type = VIDEO_PALETTE_YUV420P; case VIDEO_PALETTE_YUV420P: MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: VIDEO_PALETTE_YUV420P imgs.type"); cnt->imgs.size = (width * height * 3) / 2; cnt->imgs.motionsize = width * height; break; } /* Insert into linked list */ dev->next = viddevs; viddevs = dev; pthread_mutex_unlock(&bktr_mutex); return fd_device; #else if (!cnt) MOTION_LOG(DBG, TYPE_VIDEO, NO_ERRNO, "%s: BKTR is not enabled."); return -1; #endif }
/** * ffmpeg_put_frame * Encodes and writes a video frame using the av_write_frame API. This is * a helper function for ffmpeg_put_image and ffmpeg_put_other_image. * * Returns * Number of bytes written or -1 if any error happens. */ int ffmpeg_put_frame(struct ffmpeg *ffmpeg, AVFrame *pic) { /** * Since the logic,return values and conditions changed so * dramatically between versions, the encoding of the frame * is 100% blocked based upon Libav/FFMpeg version */ #if (LIBAVFORMAT_VERSION_MAJOR >= 55) || ((LIBAVFORMAT_VERSION_MAJOR == 54) && (LIBAVFORMAT_VERSION_MINOR > 6)) int retcd; int got_packet_ptr; AVPacket pkt; char errstr[128]; av_init_packet(&pkt); /* Init static structure. */ if (ffmpeg->oc->oformat->flags & AVFMT_RAWPICTURE) { pkt.stream_index = ffmpeg->video_st->index; pkt.flags |= AV_PKT_FLAG_KEY; pkt.data = (uint8_t *)pic; pkt.size = sizeof(AVPicture); } else { pkt.data = NULL; pkt.size = 0; retcd = avcodec_encode_video2(AVSTREAM_CODEC_PTR(ffmpeg->video_st), &pkt, pic, &got_packet_ptr); if (retcd < 0 ){ av_strerror(retcd, errstr, sizeof(errstr)); MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Error encoding video:%s",errstr); //Packet is freed upon failure of encoding return -1; } if (got_packet_ptr == 0){ //Buffered packet. Throw special return code av_free_packet(&pkt); return -2; } if (pkt.pts != AV_NOPTS_VALUE) pkt.pts = av_rescale_q(pkt.pts, ffmpeg->video_st->codec->time_base, ffmpeg->video_st->time_base); if (pkt.dts != AV_NOPTS_VALUE) pkt.dts = av_rescale_q(pkt.dts, ffmpeg->video_st->codec->time_base, ffmpeg->video_st->time_base); } if (ffmpeg->tlapse == TIMELAPSE_APPEND) { retcd = timelapse_append(ffmpeg, pkt); } else { retcd = av_write_frame(ffmpeg->oc, &pkt); } av_free_packet(&pkt); if (retcd != 0) { MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Error while writing video frame"); ffmpeg_cleanups(ffmpeg); return -1; } return retcd; #else // Old versions of Libav/FFmpeg int retcd; AVPacket pkt; av_init_packet(&pkt); /* Init static structure. */ pkt.stream_index = ffmpeg->video_st->index; if (ffmpeg->oc->oformat->flags & AVFMT_RAWPICTURE) { // Raw video case. pkt.size = sizeof(AVPicture); pkt.data = (uint8_t *)pic; pkt.flags |= AV_PKT_FLAG_KEY; } else { retcd = avcodec_encode_video(AVSTREAM_CODEC_PTR(ffmpeg->video_st), ffmpeg->video_outbuf, ffmpeg->video_outbuf_size, pic); if (retcd < 0) { MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Error encoding video"); av_free_packet(&pkt); return -1; } if (retcd == 0 ){ // No bytes encoded => buffered=>special handling av_free_packet(&pkt); return -2; } pkt.size = retcd; pkt.data = ffmpeg->video_outbuf; pkt.pts = AVSTREAM_CODEC_PTR(ffmpeg->video_st)->coded_frame->pts; if (AVSTREAM_CODEC_PTR(ffmpeg->video_st)->coded_frame->key_frame) pkt.flags |= AV_PKT_FLAG_KEY; } if (ffmpeg->tlapse == TIMELAPSE_APPEND) { retcd = timelapse_append(ffmpeg, pkt); } else { retcd = av_write_frame(ffmpeg->oc, &pkt); } if (retcd != 0) { MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Error while writing video frame"); ffmpeg_cleanups(ffmpeg); return -1; } return retcd; #endif }
/** * rotate_init * * Initializes rotation data - allocates memory and determines which function * to use for 180 degrees rotation. * * Parameters: * * cnt - the current thread's context structure * * Returns: nothing */ void rotate_init(struct context *cnt) { int size; /* Make sure temp_buf isn't freed if it hasn't been allocated. */ cnt->rotate_data.temp_buf = NULL; /* * Assign the value in conf.rotate_deg to rotate_data.degrees. This way, * we have a value that is safe from changes caused by motion-control. */ if ((cnt->conf.rotate_deg % 90) > 0) { MOTION_LOG(WRN, TYPE_ALL, NO_ERRNO, "%s: Config option \"rotate\" not a multiple of 90: %d", cnt->conf.rotate_deg); cnt->conf.rotate_deg = 0; /* Disable rotation. */ cnt->rotate_data.degrees = 0; /* Force return below. */ } else { cnt->rotate_data.degrees = cnt->conf.rotate_deg % 360; /* Range: 0..359 */ } /* * Upon entrance to this function, imgs.width and imgs.height contain the * capture dimensions (as set in the configuration file, or read from a * netcam source). * * If rotating 90 or 270 degrees, the capture dimensions and output dimensions * are not the same. Capture dimensions will be contained in cap_width and * cap_height in cnt->rotate_data, while output dimensions will be contained * in imgs.width and imgs.height. */ /* 1. Transfer capture dimensions into cap_width and cap_height. */ cnt->rotate_data.cap_width = cnt->imgs.width; cnt->rotate_data.cap_height = cnt->imgs.height; if ((cnt->rotate_data.degrees == 90) || (cnt->rotate_data.degrees == 270)) { /* 2. "Swap" imgs.width and imgs.height. */ cnt->imgs.width = cnt->rotate_data.cap_height; cnt->imgs.height = cnt->rotate_data.cap_width; } /* * If we're not rotating, let's exit once we have setup the capture dimensions * and output dimensions properly. */ if (cnt->rotate_data.degrees == 0) return; switch (cnt->imgs.type) { case VIDEO_PALETTE_YUV420P: /* * For YUV 4:2:0 planar, the memory block used for 90/270 degrees * rotation needs to be width x height x 1.5 bytes large. */ size = cnt->imgs.width * cnt->imgs.height * 3 / 2; break; case VIDEO_PALETTE_GREY: /* * For greyscale, the memory block used for 90/270 degrees rotation * needs to be width x height bytes large. */ size = cnt->imgs.width * cnt->imgs.height; break; default: cnt->rotate_data.degrees = 0; MOTION_LOG(WRN, TYPE_ALL, NO_ERRNO, "%s: Unsupported palette (%d), rotation is disabled", cnt->imgs.type); return; } /* * Allocate memory if rotating 90 or 270 degrees, because those rotations * cannot be performed in-place (they can, but it would be too slow). */ if ((cnt->rotate_data.degrees == 90) || (cnt->rotate_data.degrees == 270)) cnt->rotate_data.temp_buf = mymalloc(size); }
/* * jpeg_data: Buffer with jpeg data to decode * len: Length of buffer * itype: 0: Not interlaced * 1: Interlaced, Top field first * 2: Interlaced, Bottom field first * ctype Chroma format for decompression. * Currently only Y4M_CHROMA_{420JPEG,422} are available * returns: * -1 on fatal error * 0 on success * 1 if jpeg lib threw a "corrupt jpeg data" warning. * in this case, "a damaged output image is likely." * */ int decode_jpeg_raw (unsigned char *jpeg_data, int len, int itype, int ctype, unsigned int width, unsigned int height, unsigned char *raw0, unsigned char *raw1, unsigned char *raw2) { int numfields, hsf[3], field, yl, yc; int i, xsl, xsc, xs, hdown; unsigned int x, y = 0, vsf[3], xd; JSAMPROW row0[16] = { buf0[0], buf0[1], buf0[2], buf0[3], buf0[4], buf0[5], buf0[6], buf0[7], buf0[8], buf0[9], buf0[10], buf0[11], buf0[12], buf0[13], buf0[14], buf0[15]}; JSAMPROW row1[8] = { buf1[0], buf1[1], buf1[2], buf1[3], buf1[4], buf1[5], buf1[6], buf1[7]}; JSAMPROW row2[16] = { buf2[0], buf2[1], buf2[2], buf2[3], buf2[4], buf2[5], buf2[6], buf2[7]}; JSAMPROW row1_444[16], row2_444[16]; JSAMPARRAY scanarray[3] = { row0, row1, row2}; struct jpeg_decompress_struct dinfo; struct my_error_mgr jerr; /* We set up the normal JPEG error routines, then override error_exit. */ dinfo.err = jpeg_std_error (&jerr.pub); jerr.pub.error_exit = my_error_exit; /* Also hook the emit_message routine to note corrupt-data warnings. */ jerr.original_emit_message = jerr.pub.emit_message; jerr.pub.emit_message = my_emit_message; jerr.warning_seen = 0; /* Establish the setjmp return context for my_error_exit to use. */ if (setjmp (jerr.setjmp_buffer)) { /* If we get here, the JPEG code has signaled an error. */ jpeg_destroy_decompress (&dinfo); return 1; } jpeg_create_decompress (&dinfo); jpeg_buffer_src (&dinfo, jpeg_data, len); /* * Read header, make some checks and try to figure out what the * user really wants. */ jpeg_read_header (&dinfo, TRUE); dinfo.raw_data_out = TRUE; #if JPEG_LIB_VERSION >= 70 dinfo.do_fancy_upsampling = FALSE; #endif dinfo.out_color_space = JCS_YCbCr; dinfo.dct_method = JDCT_IFAST; guarantee_huff_tables(&dinfo); jpeg_start_decompress (&dinfo); if (dinfo.output_components != 3) { MOTION_LOG(ERR, TYPE_ALL, NO_ERRNO, "%s: Output components of JPEG image" " = %d, must be 3", dinfo.output_components); goto ERR_EXIT; } for (i = 0; i < 3; i++) { hsf[i] = dinfo.comp_info[i].h_samp_factor; vsf[i] = dinfo.comp_info[i].v_samp_factor; } if ((hsf[0] != 2 && hsf[0] != 1) || hsf[1] != 1 || hsf[2] != 1 || (vsf[0] != 1 && vsf[0] != 2) || vsf[1] != 1 || vsf[2] != 1) { MOTION_LOG(ERR, TYPE_ALL, NO_ERRNO, "%s: Unsupported sampling factors," " hsf=(%d, %d, %d) vsf=(%d, %d, %d) !", hsf[0], hsf[1], hsf[2], vsf[0], vsf[1], vsf[2]); goto ERR_EXIT; } if (hsf[0] == 1) { if (height % 8 != 0) { MOTION_LOG(ERR, TYPE_ALL, NO_ERRNO, "%s: YUV 4:4:4 sampling, but image" " height %d not dividable by 8 !", height); goto ERR_EXIT; } for (y = 0; y < 16; y++) { // Allocate a special buffer for the extra sampling depth. row1_444[y] = (unsigned char *)malloc(dinfo.output_width * sizeof(char)); row2_444[y] = (unsigned char *)malloc(dinfo.output_width * sizeof(char)); } scanarray[1] = row1_444; scanarray[2] = row2_444; } /* Height match image height or be exact twice the image height. */ if (dinfo.output_height == height) { numfields = 1; } else if (2 * dinfo.output_height == height) { numfields = 2; } else { MOTION_LOG(ERR, TYPE_ALL, NO_ERRNO, "%s: Read JPEG: requested height = %d, " "height of image = %d", height, dinfo.output_height); goto ERR_EXIT; } /* Width is more flexible */ if (dinfo.output_width > MAX_LUMA_WIDTH) { MOTION_LOG(ERR, TYPE_ALL, NO_ERRNO, "%s: Image width of %d exceeds max", dinfo.output_width); goto ERR_EXIT; } if (width < 2 * dinfo.output_width / 3) { /* Downsample 2:1 */ hdown = 1; if (2 * width < dinfo.output_width) xsl = (dinfo.output_width - 2 * width) / 2; else xsl = 0; } else if (width == 2 * dinfo.output_width / 3) { /* Special case of 3:2 downsampling */ hdown = 2; xsl = 0; } else { /* No downsampling */ hdown = 0; if (width < dinfo.output_width) xsl = (dinfo.output_width - width) / 2; else xsl = 0; } /* Make xsl even, calculate xsc */ xsl = xsl & ~1; xsc = xsl / 2; yl = yc = 0; for (field = 0; field < numfields; field++) { if (field > 0) { jpeg_read_header (&dinfo, TRUE); dinfo.raw_data_out = TRUE; #if JPEG_LIB_VERSION >= 70 dinfo.do_fancy_upsampling = FALSE; #endif dinfo.out_color_space = JCS_YCbCr; dinfo.dct_method = JDCT_IFAST; jpeg_start_decompress (&dinfo); } if (numfields == 2) { switch (itype) { case Y4M_ILACE_TOP_FIRST: yl = yc = field; break; case Y4M_ILACE_BOTTOM_FIRST: yl = yc = (1 - field); break; default: MOTION_LOG(ERR, TYPE_ALL, NO_ERRNO, "%s: Input is interlaced but" " no interlacing set"); goto ERR_EXIT; } } else { yl = yc = 0; } while (dinfo.output_scanline < dinfo.output_height) { /* Read raw data */ jpeg_read_raw_data (&dinfo, scanarray, 8 * vsf[0]); for (y = 0; y < 8 * vsf[0]; yl += numfields, y++) { xd = yl * width; xs = xsl; if (hdown == 0) { for (x = 0; x < width; x++) raw0[xd++] = row0[y][xs++]; } else if (hdown == 1) { for (x = 0; x < width; x++, xs += 2) raw0[xd++] = (row0[y][xs] + row0[y][xs + 1]) >> 1; } else { for (x = 0; x < width / 2; x++, xd += 2, xs += 3) { raw0[xd] = (2 * row0[y][xs] + row0[y][xs + 1]) / 3; raw0[xd + 1] = (2 * row0[y][xs + 2] + row0[y][xs + 1]) / 3; } } } /* Horizontal downsampling of chroma */ for (y = 0; y < 8; y++) { xs = xsc; if (hsf[0] == 1) for (x = 0; x < width / 2; x++, xs++) { row1[y][xs] = (row1_444[y][2*x] + row1_444[y][2*x + 1]) >> 1; row2[y][xs] = (row2_444[y][2*x] + row2_444[y][2*x + 1]) >> 1; } xs = xsc; if (hdown == 0) { for (x = 0; x < width / 2; x++, xs++) { chr1[y][x] = row1[y][xs]; chr2[y][x] = row2[y][xs]; } } else if (hdown == 1) { for (x = 0; x < width / 2; x++, xs += 2) { chr1[y][x] = (row1[y][xs] + row1[y][xs + 1]) >> 1; chr2[y][x] = (row2[y][xs] + row2[y][xs + 1]) >> 1; } } else { for (x = 0; x < width / 2; x += 2, xs += 3) { chr1[y][x] = (2 * row1[y][xs] + row1[y][xs + 1]) / 3; chr1[y][x + 1] = (2 * row1[y][xs + 2] + row1[y][xs + 1]) / 3; chr2[y][x] = (2 * row2[y][xs] + row2[y][xs + 1]) / 3; chr2[y][x + 1] = (2 * row2[y][xs + 2] + row2[y][xs + 1]) / 3; } } } /* Vertical resampling of chroma */ switch (ctype) { case Y4M_CHROMA_422: if (vsf[0] == 1) { /* Just copy */ for (y = 0; y < 8 /*&& yc < height */; y++, yc += numfields) { xd = yc * width / 2; for (x = 0; x < width / 2; x++, xd++) { raw1[xd] = chr1[y][x]; raw2[xd] = chr2[y][x]; } } } else { /* upsample */ for (y = 0; y < 8 /*&& yc < height */; y++) { xd = yc * width / 2; for (x = 0; x < width / 2; x++, xd++) { raw1[xd] = chr1[y][x]; raw2[xd] = chr2[y][x]; } yc += numfields; xd = yc * width / 2; for (x = 0; x < width / 2; x++, xd++) { raw1[xd] = chr1[y][x]; raw2[xd] = chr2[y][x]; } yc += numfields; } } break; default: /* * Should be case Y4M_CHROMA_420JPEG: but use default: for compatibility. Some * pass things like '420' in with the expectation that anything other than * Y4M_CHROMA_422 will default to 420JPEG. */ if (vsf[0] == 1) { /* Really downsample */ for (y = 0; y < 8 /*&& yc < height/2*/; y += 2, yc += numfields) { xd = yc * width / 2; for (x = 0; x < width / 2; x++, xd++) { assert(xd < (width * height / 4)); raw1[xd] = (chr1[y][x] + chr1[y + 1][x]) >> 1; raw2[xd] = (chr2[y][x] + chr2[y + 1][x]) >> 1; } } } else { /* Just copy */ for (y = 0; y < 8 /* && yc < height / 2 */; y++, yc += numfields) { xd = yc * width / 2; for (x = 0; x < width / 2; x++, xd++) { raw1[xd] = chr1[y][x]; raw2[xd] = chr2[y][x]; } } } break; }
/** * v4l2_select_input */ static int v4l2_select_input(struct config *conf, struct video_dev *viddev, src_v4l2_t * vid_source, int in, int norm, unsigned long freq_, int tuner_number ATTRIBUTE_UNUSED) { struct v4l2_input input; struct v4l2_standard standard; v4l2_std_id std_id; /* Set the input. */ memset(&input, 0, sizeof (input)); if (in == IN_DEFAULT) input.index = IN_TV; else input.index = in; if (xioctl(vid_source->fd, VIDIOC_ENUMINPUT, &input) == -1) { MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: Unable to query input %d." " VIDIOC_ENUMINPUT, if you use a WEBCAM change input value in conf by -1", input.index); return -1; } MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: name = \"%s\", type 0x%08X," " status %08x", input.name, input.type, input.status); if (input.type & V4L2_INPUT_TYPE_TUNER) MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: - TUNER"); if (input.type & V4L2_INPUT_TYPE_CAMERA) MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: - CAMERA"); if (xioctl(vid_source->fd, VIDIOC_S_INPUT, &input.index) == -1) { MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: Error selecting input %d" " VIDIOC_S_INPUT", input.index); return -1; } viddev->input = conf->input = in; /* * Set video standard usually webcams doesn't support the ioctl or * return V4L2_STD_UNKNOWN */ if (xioctl(vid_source->fd, VIDIOC_G_STD, &std_id) == -1) { MOTION_LOG(WRN, TYPE_VIDEO, NO_ERRNO, "%s: Device doesn't support VIDIOC_G_STD"); norm = std_id = 0; // V4L2_STD_UNKNOWN = 0 } if (std_id) { memset(&standard, 0, sizeof(standard)); standard.index = 0; while (xioctl(vid_source->fd, VIDIOC_ENUMSTD, &standard) == 0) { if (standard.id & std_id) MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: - video standard %s", standard.name); standard.index++; } switch (norm) { case 1: std_id = V4L2_STD_NTSC; break; case 2: std_id = V4L2_STD_SECAM; break; default: std_id = V4L2_STD_PAL; } if (xioctl(vid_source->fd, VIDIOC_S_STD, &std_id) == -1) MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: Error selecting standard" " method %d VIDIOC_S_STD", (int)std_id); MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Set standard method %d", (int)std_id); } viddev->norm = conf->norm = norm; /* If this input is attached to a tuner, set the frequency. */ if (input.type & V4L2_INPUT_TYPE_TUNER) { struct v4l2_tuner tuner; struct v4l2_frequency freq; /* Query the tuners capabilities. */ memset(&tuner, 0, sizeof(struct v4l2_tuner)); tuner.index = input.tuner; if (xioctl(vid_source->fd, VIDIOC_G_TUNER, &tuner) == -1) { MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: tuner %d VIDIOC_G_TUNER", tuner.index); return 0; } MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Set tuner %d", tuner.index); /* Set the frequency. */ memset(&freq, 0, sizeof(struct v4l2_frequency)); freq.tuner = input.tuner; freq.type = V4L2_TUNER_ANALOG_TV; freq.frequency = (freq_ / 1000) * 16; if (xioctl(vid_source->fd, VIDIOC_S_FREQUENCY, &freq) == -1) { MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: freq %ul VIDIOC_S_FREQUENCY", freq.frequency); return 0; } viddev->freq = conf->frequency = freq_; MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Set Frequency to %ul", freq.frequency); } else { viddev->freq = conf->frequency = 0; } return 0; }
/** * v4l_open_vidpipe * */ static int v4l_open_vidpipe(void) { int pipe_fd = -1; char pipepath[255]; char buffer[255]; char *major; char *minor; struct utsname uts; if (uname(&uts) < 0) { MOTION_LOG(CRT, TYPE_VIDEO, SHOW_ERRNO, "%s: Unable to execute uname"); return -1; } major = strtok(uts.release, "."); minor = strtok(NULL, "."); if ((major == NULL) || (minor == NULL) || (strcmp(major, "2"))) { MOTION_LOG(CRT, TYPE_VIDEO, SHOW_ERRNO, "%s: Unable to decipher OS version"); return -1; } if (strcmp(minor, "5") < 0) { FILE *vloopbacks; char *loop; char *input; char *istatus; char *output; char *ostatus; vloopbacks = fopen("/proc/video/vloopback/vloopbacks", "r"); if (!vloopbacks) { MOTION_LOG(CRT, TYPE_VIDEO, SHOW_ERRNO, "%s: Failed to open " "'/proc/video/vloopback/vloopbacks'"); return -1; } /* Read vloopback version*/ if (!fgets(buffer, sizeof(buffer), vloopbacks)) { MOTION_LOG(CRT, TYPE_VIDEO, SHOW_ERRNO, "%s: Unable to read vloopback version"); return -1; } fprintf(stderr, "\t%s", buffer); /* Read explanation line */ if (!fgets(buffer, sizeof(buffer), vloopbacks)) { MOTION_LOG(CRT, TYPE_VIDEO, SHOW_ERRNO, "%s: Unable to read vloopback" " explanation line"); return -1; } while (fgets(buffer, sizeof(buffer), vloopbacks)) { if (strlen(buffer) > 1) { buffer[strlen(buffer)-1] = 0; loop = strtok(buffer, "\t"); input = strtok(NULL, "\t"); istatus = strtok(NULL, "\t"); output = strtok(NULL, "\t"); ostatus = strtok(NULL, "\t"); if (istatus[0] == '-') { snprintf(pipepath, sizeof(pipepath), "/dev/%s", input); pipe_fd = open(pipepath, O_RDWR); if (pipe_fd >= 0) { MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: \tInput: /dev/%s " "\tOutput: /dev/%s", input, output); break; } } } } myfclose(vloopbacks); } else { DIR *dir; struct dirent *dirp; const char prefix[] = "/sys/class/video4linux/"; char *ptr, *io; int fd; int low = 9999; int tfd; int tnum; if ((dir = opendir(prefix)) == NULL) { MOTION_LOG(CRT, TYPE_VIDEO, SHOW_ERRNO, "%s: Failed to open '%s'", prefix); return -1; } while ((dirp = readdir(dir)) != NULL) { if (!strncmp(dirp->d_name, "video", 5)) { strncpy(buffer, prefix, sizeof(buffer)); strncat(buffer, dirp->d_name, sizeof(buffer) - strlen(buffer)); strncat(buffer, "/name", sizeof(buffer) - strlen(buffer)); if ((fd = open(buffer, O_RDONLY)) >= 0) { if ((read(fd, buffer, sizeof(buffer)-1)) < 0) { close(fd); continue; } ptr = strtok(buffer, " "); if (strcmp(ptr, "Video")) { close(fd); continue; } major = strtok(NULL, " "); minor = strtok(NULL, " "); io = strtok(NULL, " \n"); if (strcmp(major, "loopback") || strcmp(io, "input")) { close(fd); continue; } if ((ptr = strtok(buffer, " ")) == NULL) { close(fd); continue; } tnum = atoi(minor); if (tnum < low) { mystrcpy(buffer, "/dev/"); strncat(buffer, dirp->d_name, sizeof(buffer) - strlen(buffer)); if ((tfd = open(buffer, O_RDWR)) >= 0) { strncpy(pipepath, buffer, sizeof(pipepath)); if (pipe_fd >= 0) close(pipe_fd); pipe_fd = tfd; low = tnum; } } close(fd); } } } closedir(dir); if (pipe_fd >= 0) MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Opened %s as input", pipepath); } return pipe_fd; }
/** * v4l2_get_capability */ static int v4l2_get_capability(src_v4l2_t * vid_source) { if (xioctl(vid_source->fd, VIDIOC_QUERYCAP, &vid_source->cap) < 0) { MOTION_LOG(ERR, TYPE_VIDEO, NO_ERRNO, "%s: Not a V4L2 device?"); return -1; } MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: \n------------------------\n" "cap.driver: \"%s\"\n" "cap.card: \"%s\"\n" "cap.bus_info: \"%s\"\n" "cap.capabilities=0x%08X\n------------------------", vid_source->cap.driver, vid_source->cap.card, vid_source->cap.bus_info, vid_source->cap.capabilities); if (vid_source->cap.capabilities & V4L2_CAP_VIDEO_CAPTURE) MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: - VIDEO_CAPTURE"); if (vid_source->cap.capabilities & V4L2_CAP_VIDEO_OUTPUT) MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: - VIDEO_OUTPUT"); if (vid_source->cap.capabilities & V4L2_CAP_VIDEO_OVERLAY) MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: - VIDEO_OVERLAY"); if (vid_source->cap.capabilities & V4L2_CAP_VBI_CAPTURE) MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: - VBI_CAPTURE"); if (vid_source->cap.capabilities & V4L2_CAP_VBI_OUTPUT) MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: - VBI_OUTPUT"); if (vid_source->cap.capabilities & V4L2_CAP_RDS_CAPTURE) MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: - RDS_CAPTURE"); if (vid_source->cap.capabilities & V4L2_CAP_TUNER) MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: - TUNER"); if (vid_source->cap.capabilities & V4L2_CAP_AUDIO) MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: - AUDIO"); if (vid_source->cap.capabilities & V4L2_CAP_READWRITE) MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: - READWRITE"); if (vid_source->cap.capabilities & V4L2_CAP_ASYNCIO) MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: - ASYNCIO"); if (vid_source->cap.capabilities & V4L2_CAP_STREAMING) MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: - STREAMING"); if (vid_source->cap.capabilities & V4L2_CAP_TIMEPERFRAME) MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: - TIMEPERFRAME"); if (!(vid_source->cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)) { MOTION_LOG(ERR, TYPE_VIDEO, NO_ERRNO, "%s: Device does not support capturing."); return -1; } return 0; }
/** * vid_close * * vid_close is called from motion.c when a Motion thread is stopped or restarted. */ void vid_close(struct context *cnt) { #ifndef WITHOUT_V4L struct video_dev *dev = viddevs; struct video_dev *prev = NULL; #endif /* Cleanup the netcam part */ if (cnt->netcam) { netcam_cleanup(cnt->netcam, 0); cnt->netcam = NULL; return; } #ifndef WITHOUT_V4L /* Cleanup the v4l part */ pthread_mutex_lock(&vid_mutex); while (dev) { if (dev->fd_bktr == cnt->video_dev) break; prev = dev; dev = dev->next; } pthread_mutex_unlock(&vid_mutex); /* Set it as closed in thread context. */ cnt->video_dev = -1; if (dev == NULL) { MOTION_LOG(CRT, TYPE_VIDEO, NO_ERRNO, "%s: Unable to find video device"); return; } if (--dev->usage_count == 0) { MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Closing video device %s", dev->video_device); if (dev->fd_tuner > 0) close(dev->fd_tuner); if (dev->fd_bktr > 0) { if (dev->capture_method == METEOR_CAP_CONTINOUS) { dev->fd_tuner = METEOR_CAP_STOP_CONT; ioctl(dev->fd_bktr, METEORCAPTUR, &dev->fd_tuner); } close(dev->fd_bktr); dev->fd_tuner = -1; } munmap(viddevs->v4l_buffers[0], viddevs->v4l_bufsize); viddevs->v4l_buffers[0] = MAP_FAILED; dev->fd_bktr = -1; pthread_mutex_lock(&vid_mutex); /* Remove from list */ if (prev == NULL) viddevs = dev->next; else prev->next = dev->next; pthread_mutex_unlock(&vid_mutex); pthread_mutexattr_destroy(&dev->attr); pthread_mutex_destroy(&dev->mutex); free(dev); } else { MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Still %d users of video device %s, " "so we don't close it now", dev->usage_count, dev->video_device); /* * There is still at least one thread using this device * If we own it, release it. */ if (dev->owner == cnt->threadnr) { dev->frames = 0; dev->owner = -1; pthread_mutex_unlock(&dev->mutex); } } #endif /* !WITHOUT_V4L */ }
/******************************************************************************************* Video capture routines - set input - setup_pixelformat - set_geometry - set_brightness - set_chroma - set_contrast - set_channelset - set_channel - set_capture_mode */ static unsigned char *v4l_start(struct video_dev *viddev, int width, int height, unsigned input, unsigned norm, unsigned long freq) { int dev_bktr = viddev->fd_bktr; struct sigaction act, old; //int dev_tunner = viddev->fd_tuner; /* to ensure that all device will be support the capture mode _TODO_ : Autodected the best capture mode . */ int dummy = 1; // int pixelformat = BSD_VIDFMT_I420; void *map; /* If we have choose the tuner is needed to setup the frequency. */ if ((viddev->tuner_device != NULL) && (input == IN_TV)) { if (!freq) { MOTION_LOG(WRN, TYPE_VIDEO, NO_ERRNO, "%s: Not valid Frequency [%lu] for " "Source input [%i]", freq, input); return NULL; } else if (set_freq(viddev, freq) == -1) { MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: Frequency [%lu] Source input [%i]", freq, input); return NULL; } } /* FIXME if we set as input tuner , we need to set option for tuner not for bktr */ if ((dummy = set_input(viddev, input)) == -1) { MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: set input [%d]", input); return NULL; } viddev->input = dummy; if ((dummy = set_input_format(viddev, norm)) == -1) { MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: set input format [%d]", norm); return NULL; } viddev->norm = dummy; if (set_geometry(viddev, width, height) == -1) { MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: set geometry [%d]x[%d]", width, height); return NULL; } /* if (ioctl(dev_bktr, METEORSACTPIXFMT, &pixelformat) < 0) { MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: set encoding method BSD_VIDFMT_I420"); return NULL; } NEEDED !? FIXME if (setup_pixelformat(viddev) == -1) return NULL; */ if (freq) { MOTION_LOG(WRN, TYPE_VIDEO, NO_ERRNO, "%s: Frequency set (no implemented yet"); /* TODO missing implementation set_channelset(viddev); set_channel(viddev); if (set_freq (viddev, freq) == -1) return NULL; */ } /* * Set capture mode and capture buffers * That is the buffer size for capture images , * so is dependent of color space of input format / FIXME */ viddev->v4l_bufsize = (((width * height * 3 / 2)) * sizeof(unsigned char)); viddev->v4l_fmt = VIDEO_PALETTE_YUV420P; map = mmap((caddr_t)0, viddev->v4l_bufsize, PROT_READ|PROT_WRITE, MAP_SHARED, dev_bktr, (off_t)0); if (map == MAP_FAILED) { MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: mmap failed"); return NULL; } /* FIXME double buffer */ if (0) { viddev->v4l_maxbuffer = 2; viddev->v4l_buffers[0] = map; viddev->v4l_buffers[1] = (unsigned char *)map + 0; /* 0 is not valid just a test */ //viddev->v4l_buffers[1] = map+vid_buf.offsets[1]; } else { viddev->v4l_buffers[0] = map; viddev->v4l_maxbuffer = 1; } viddev->v4l_curbuffer = 0; /* Clear the buffer */ if (ioctl(dev_bktr, BT848SCBUF, &dummy) < 0) { MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: BT848SCBUF"); return NULL; } /* Signal handler to know when data is ready to be read() */ memset(&act, 0, sizeof(act)); sigemptyset(&act.sa_mask); act.sa_handler = catchsignal; sigaction(SIGUSR2, &act, &old); dummy = SIGUSR2; //viddev->capture_method = METEOR_CAP_CONTINOUS; //viddev->capture_method = METEOR_CAP_SINGLE; if ((viddev->capture_method == METEOR_CAP_CONTINOUS) && (ioctl(dev_bktr, METEORSSIGNAL, &dummy) < 0)) { MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: METEORSSIGNAL"); viddev->capture_method = METEOR_CAP_SINGLE; if (ioctl(dev_bktr, METEORCAPTUR, &viddev->capture_method) < 0) { MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: METEORCAPTUR using single method " "Error capturing"); MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: METEORCAPTUR using single method " "Error capturing"); } } else { if (ioctl(dev_bktr, METEORCAPTUR, &viddev->capture_method) < 0) { viddev->capture_method = METEOR_CAP_SINGLE; if (ioctl(dev_bktr, METEORCAPTUR, &viddev->capture_method) < 0) { MOTION_LOG(ERR, TYPE_VIDEO, SHOW_ERRNO, "%s: METEORCAPTUR using single method " "Error capturing"); } } } if (viddev->capture_method == METEOR_CAP_CONTINOUS) MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: METEORCAPTUR METEOR_CAP_CONTINOUS"); else MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: METEORCAPTUR METEOR_CAP_SINGLE"); // settle , sleep(1) replaced SLEEP(1, 0); /* FIXME */ switch (viddev->v4l_fmt) { case VIDEO_PALETTE_YUV420P: viddev->v4l_bufsize = (width * height * 3) / 2; break; case VIDEO_PALETTE_YUV422: viddev->v4l_bufsize = (width * height * 2); break; case VIDEO_PALETTE_RGB24: viddev->v4l_bufsize = (width * height * 3); break; case VIDEO_PALETTE_GREY: viddev->v4l_bufsize = width * height; break; } MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: HUE [%d]", get_hue(dev_bktr, &dummy)); MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: SATURATION [%d]", get_saturation(dev_bktr, &dummy)); MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: BRIGHTNESS [%d]", get_brightness(dev_bktr, &dummy)); MOTION_LOG(NTC, TYPE_VIDEO, NO_ERRNO, "%s: CONTRAST [%d]", get_contrast(dev_bktr, &dummy)); return map; }
/** * netcam_init_jpeg * * Initialises the JPEG library prior to doing a * decompression. * * Parameters: * netcam pointer to netcam_context. * cinfo pointer to JPEG decompression context. * * Returns: Error code. */ static int netcam_init_jpeg(netcam_context_ptr netcam, j_decompress_ptr cinfo) { netcam_buff_ptr buff; /* * First we check whether a new image has arrived. If not, we * setup to wait for 1/2 a frame time. This will (hopefully) * help in synchronizing the camera frames with the motion main * loop. */ pthread_mutex_lock(&netcam->mutex); if (netcam->imgcnt_last == netcam->imgcnt) { /* Need to wait */ struct timespec waittime; struct timeval curtime; int retcode; /* * We calculate the delay time (representing the desired frame * rate). This delay time is in *nanoseconds*. * We will wait 0.5 seconds which gives a practical minimum * framerate of 2 which is desired for the motion_loop to * function. */ gettimeofday(&curtime, NULL); curtime.tv_usec += 500000; if (curtime.tv_usec > 1000000) { curtime.tv_usec -= 1000000; curtime.tv_sec++; } waittime.tv_sec = curtime.tv_sec; waittime.tv_nsec = 1000L * curtime.tv_usec; do { retcode = pthread_cond_timedwait(&netcam->pic_ready, &netcam->mutex, &waittime); } while (retcode == EINTR); if (retcode) { /* We assume a non-zero reply is ETIMEOUT */ pthread_mutex_unlock(&netcam->mutex); MOTION_LOG(WRN, TYPE_NETCAM, NO_ERRNO, "%s: no new pic, no signal rcvd"); return NETCAM_GENERAL_ERROR | NETCAM_NOTHING_NEW_ERROR; } MOTION_LOG(INF, TYPE_NETCAM, NO_ERRNO, "%s: ***new pic delay successful***"); } netcam->imgcnt_last = netcam->imgcnt; /* Set latest buffer as "current". */ buff = netcam->latest; netcam->latest = netcam->jpegbuf; netcam->jpegbuf = buff; pthread_mutex_unlock(&netcam->mutex); /* Clear any error flag from previous work. */ netcam->jpeg_error = 0; buff = netcam->jpegbuf; /* * Prepare for the decompression. * Initialize the JPEG decompression object. */ jpeg_create_decompress(cinfo); /* Set up own error exit routine. */ cinfo->err = jpeg_std_error(&netcam->jerr); cinfo->client_data = netcam; netcam->jerr.error_exit = netcam_error_exit; netcam->jerr.output_message = netcam_output_message; /* Specify the data source as our own routine. */ netcam_memory_src(cinfo, buff->ptr, buff->used); /* Read file parameters (rejecting tables-only). */ jpeg_read_header(cinfo, TRUE); /* Override the desired colour space. */ cinfo->out_color_space = JCS_YCbCr; /* Start the decompressor. */ jpeg_start_decompress(cinfo); MOTION_LOG(DBG, TYPE_NETCAM, NO_ERRNO, "%s: jpeg_error %d", netcam->jpeg_error); return netcam->jpeg_error; }
/** * get_oformat * Obtains the output format used for the specified codec. For mpeg4 codecs, * the format is avi; for mpeg1 codec, the format is mpeg. The filename has * to be passed, because it gets the appropriate extension appended onto it. * * Returns * AVOutputFormat pointer or NULL if any error happens. */ static AVOutputFormat *get_oformat(const char *codec, char *filename) { const char *ext; AVOutputFormat *of = NULL; /* * Here, we use guess_format to automatically setup the codec information. * If we are using msmpeg4, manually set that codec here. * We also dynamically add the file extension to the filename here. This was * done to support both mpeg1 and mpeg4 codecs since they have different extensions. */ if (strcmp(codec, "tlapse") == 0) { ext = ".swf"; of = av_guess_format("swf", NULL, NULL); if (of) of->video_codec = MY_CODEC_ID_MPEG2VIDEO; } else if (strcmp(codec, "mpeg4") == 0) { ext = ".avi"; of = av_guess_format("avi", NULL, NULL); } else if (strcmp(codec, "msmpeg4") == 0) { ext = ".avi"; of = av_guess_format("avi", NULL, NULL); /* Manually override the codec id. */ if (of) of->video_codec = MY_CODEC_ID_MSMPEG4V2; } else if (strcmp(codec, "swf") == 0) { ext = ".swf"; of = av_guess_format("swf", NULL, NULL); } else if (strcmp(codec, "flv") == 0) { ext = ".flv"; of = av_guess_format("flv", NULL, NULL); of->video_codec = MY_CODEC_ID_FLV1; } else if (strcmp(codec, "ffv1") == 0) { ext = ".avi"; of = av_guess_format("avi", NULL, NULL); if (of) of->video_codec = MY_CODEC_ID_FFV1; } else if (strcmp(codec, "mov") == 0) { ext = ".mov"; of = av_guess_format("mov", NULL, NULL); } else if (strcmp (codec, "ogg") == 0) { ext = ".ogg"; of = av_guess_format ("ogg", NULL, NULL); } else if (strcmp (codec, "mp4") == 0){ ext = ".mp4"; of = av_guess_format ("mp4", NULL, NULL); of->video_codec = MY_CODEC_ID_H264; } else if (strcmp (codec, "mkv") == 0){ ext = ".mkv"; of = av_guess_format ("matroska", NULL, NULL); of->video_codec = MY_CODEC_ID_H264; } else if (strcmp (codec, "hevc") == 0){ ext = ".mp4"; of = av_guess_format ("mp4", NULL, NULL); of->video_codec = MY_CODEC_ID_HEVC; } else { MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: ffmpeg_video_codec option value" " %s is not supported", codec); return NULL; } if (!of) { MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: Could not guess format for %s", codec); return NULL; } /* The 4 allows for ".avi" or ".mpg" to be appended. */ strncat(filename, ext, 4); return of; }
/** * netcam_image_conv * * Parameters: * netcam pointer to netcam_context * cinfo pointer to JPEG decompression context * image pointer to buffer of destination image (yuv420) * * Returns : netcam->jpeg_error */ static int netcam_image_conv(netcam_context_ptr netcam, struct jpeg_decompress_struct *cinfo, unsigned char *image) { JSAMPARRAY line; /* Array of decomp data lines */ unsigned char *wline; /* Will point to line[0] */ /* Working variables */ int linesize, i; unsigned char *upic, *vpic; unsigned char *pic = image; unsigned char y; /* Switch for decoding YUV data */ unsigned int width, height; width = cinfo->output_width; height = cinfo->output_height; if (width && ((width != netcam->width) || (height != netcam->height))) { MOTION_LOG(WRN, TYPE_NETCAM, NO_ERRNO, "%s: JPEG image size %dx%d, JPEG was %dx%d", netcam->width, netcam->height, width, height); jpeg_destroy_decompress(cinfo); netcam->jpeg_error |= 4; return netcam->jpeg_error; } /* Set the output pointers (these come from YUV411P definition. */ upic = pic + width * height; vpic = upic + (width * height) / 4; /* YCbCr format will give us one byte each for YUV. */ linesize = cinfo->output_width * 3; /* Allocate space for one line. */ line = (cinfo->mem->alloc_sarray)((j_common_ptr) cinfo, JPOOL_IMAGE, cinfo->output_width * cinfo->output_components, 1); wline = line[0]; y = 0; while (cinfo->output_scanline < height) { jpeg_read_scanlines(cinfo, line, 1); for (i = 0; i < linesize; i += 3) { pic[i / 3] = wline[i]; if (i & 1) { upic[(i / 3) / 2] = wline[i + 1]; vpic[(i / 3) / 2] = wline[i + 2]; } } pic += linesize / 3; if (y++ & 1) { upic += width / 2; vpic += width / 2; } } jpeg_finish_decompress(cinfo); jpeg_destroy_decompress(cinfo); if (netcam->cnt->rotate_data.degrees > 0) /* Rotate as specified */ rotate_map(netcam->cnt, image); MOTION_LOG(DBG, TYPE_NETCAM, NO_ERRNO, "%s: jpeg_error %d", netcam->jpeg_error); return netcam->jpeg_error; }
/** * ffmpeg_open * Opens an mpeg file using the new libavformat method. Both mpeg1 * and mpeg4 are supported. However, if the current ffmpeg version doesn't allow * mpeg1 with non-standard framerate, the open will fail. Timelapse is a special * case and is tested separately. * * Returns * A new allocated ffmpeg struct or NULL if any error happens. */ struct ffmpeg *ffmpeg_open(const char *ffmpeg_video_codec, char *filename, unsigned char *y, unsigned char *u, unsigned char *v, int width, int height, int rate, int bps, int vbr, int tlapse) { AVCodecContext *c; AVCodec *codec; struct ffmpeg *ffmpeg; int ret; char errstr[128]; AVDictionary *opts = 0; /* * Allocate space for our ffmpeg structure. This structure contains all the * codec and image information we need to generate movies. */ ffmpeg = mymalloc(sizeof(struct ffmpeg)); memset(ffmpeg, 0, sizeof(struct ffmpeg)); ffmpeg->vbr = vbr; ffmpeg->tlapse = tlapse; /* Store codec name in ffmpeg->codec, with buffer overflow check. */ snprintf(ffmpeg->codec, sizeof(ffmpeg->codec), "%s", ffmpeg_video_codec); /* Allocation the output media context. */ ffmpeg->oc = avformat_alloc_context(); if (!ffmpeg->oc) { MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Could not allocate output context"); ffmpeg_cleanups(ffmpeg); return NULL; } /* Setup output format */ if (ffmpeg->tlapse == TIMELAPSE_APPEND){ ffmpeg->oc->oformat = get_oformat("tlapse", filename); } else { ffmpeg->oc->oformat = get_oformat(ffmpeg_video_codec, filename); } if (!ffmpeg->oc->oformat) { ffmpeg_cleanups(ffmpeg); return NULL; } snprintf(ffmpeg->oc->filename, sizeof(ffmpeg->oc->filename), "%s", filename); /* Create a new video stream and initialize the codecs. */ ffmpeg->video_st = NULL; if (ffmpeg->oc->oformat->video_codec != MY_CODEC_ID_NONE) { codec = avcodec_find_encoder(ffmpeg->oc->oformat->video_codec); if (!codec) { MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: Codec %s not found", ffmpeg_video_codec); ffmpeg_cleanups(ffmpeg); return NULL; } ffmpeg->video_st = avformat_new_stream(ffmpeg->oc, codec); if (!ffmpeg->video_st) { MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Could not alloc stream"); ffmpeg_cleanups(ffmpeg); return NULL; } } else { /* We did not get a proper video codec. */ MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: Could not get the codec"); ffmpeg_cleanups(ffmpeg); return NULL; } ffmpeg->c = c = AVSTREAM_CODEC_PTR(ffmpeg->video_st); c->codec_id = ffmpeg->oc->oformat->video_codec; c->codec_type = AVMEDIA_TYPE_VIDEO; c->bit_rate = bps; c->width = width; c->height = height; c->time_base.num = 1; c->time_base.den = rate; c->gop_size = 12; c->pix_fmt = MY_PIX_FMT_YUV420P; c->max_b_frames = 0; if (c->codec_id == MY_CODEC_ID_H264 || c->codec_id == MY_CODEC_ID_HEVC){ av_dict_set(&opts, "preset", "ultrafast", 0); av_dict_set(&opts, "crf", "18", 0); av_dict_set(&opts, "tune", "zerolatency", 0); } if (strcmp(ffmpeg_video_codec, "ffv1") == 0) c->strict_std_compliance = -2; if (vbr) c->flags |= CODEC_FLAG_QSCALE; if (!strcmp(ffmpeg->oc->oformat->name, "mp4") || !strcmp(ffmpeg->oc->oformat->name, "mov") || !strcmp(ffmpeg->oc->oformat->name, "3gp")) { c->flags |= CODEC_FLAG_GLOBAL_HEADER; } /* Get a mutex lock. */ pthread_mutex_lock(&global_lock); ret = avcodec_open2(c, codec, &opts); pthread_mutex_unlock(&global_lock); if (ret < 0) { if (codec->supported_framerates) { const AVRational *fps = codec->supported_framerates; while (fps->num) { MOTION_LOG(NTC, TYPE_ENCODER, NO_ERRNO, "%s Reported FPS Supported %d/%d", fps->num, fps->den); fps++; } } int chkrate = 1; pthread_mutex_lock(&global_lock); while ((chkrate < 36) && (ret != 0)) { c->time_base.den = chkrate; ret = avcodec_open2(c, codec, &opts); chkrate++; } pthread_mutex_unlock(&global_lock); if (ret < 0) { av_strerror(ret, errstr, sizeof(errstr)); MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: Could not open codec %s",errstr); av_dict_free(&opts); ffmpeg_cleanups(ffmpeg); return NULL; } } av_dict_free(&opts); MOTION_LOG(NTC, TYPE_ENCODER, NO_ERRNO, "%s Selected Output FPS %d", c->time_base.den); ffmpeg->video_outbuf = NULL; if (!(ffmpeg->oc->oformat->flags & AVFMT_RAWPICTURE)) { ffmpeg->video_outbuf_size = ffmpeg->c->width * 512; ffmpeg->video_outbuf = mymalloc(ffmpeg->video_outbuf_size); } ffmpeg->picture = my_frame_alloc(); if (!ffmpeg->picture) { MOTION_LOG(ERR, TYPE_ENCODER, NO_ERRNO, "%s: could not alloc frame"); ffmpeg_cleanups(ffmpeg); return NULL; } /* Set variable bitrate if requested. */ if (ffmpeg->vbr) ffmpeg->picture->quality = ffmpeg->vbr; /* Set the frame data. */ ffmpeg->picture->data[0] = y; ffmpeg->picture->data[1] = u; ffmpeg->picture->data[2] = v; ffmpeg->picture->linesize[0] = ffmpeg->c->width; ffmpeg->picture->linesize[1] = ffmpeg->c->width / 2; ffmpeg->picture->linesize[2] = ffmpeg->c->width / 2; /* Open the output file, if needed. */ if ((access(filename, W_OK) == 0) || (ffmpeg->tlapse != TIMELAPSE_APPEND)) { if (!(ffmpeg->oc->oformat->flags & AVFMT_NOFILE)) { if (avio_open(&ffmpeg->oc->pb, filename, MY_FLAG_WRITE) < 0) { if (errno == ENOENT) { if (create_path(filename) == -1) { ffmpeg_cleanups(ffmpeg); return NULL; } if (avio_open(&ffmpeg->oc->pb, filename, MY_FLAG_WRITE) < 0) { MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: error opening file %s", filename); ffmpeg_cleanups(ffmpeg); return NULL; } /* Permission denied */ } else if (errno == EACCES) { MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Permission denied. %s",filename); ffmpeg_cleanups(ffmpeg); return NULL; } else { MOTION_LOG(ERR, TYPE_ENCODER, SHOW_ERRNO, "%s: Error opening file %s", filename); ffmpeg_cleanups(ffmpeg); return NULL; } } } /* Write the stream header, For the TIMELAPSE_APPEND * we write the data via standard file I/O so we close the * items here */ avformat_write_header(ffmpeg->oc, NULL); if (ffmpeg->tlapse == TIMELAPSE_APPEND) { av_write_trailer(ffmpeg->oc); avio_close(ffmpeg->oc->pb); } } return ffmpeg; }
/** * ftp_connect * * Tries to open a control connection. * * Parameters: * * ctxt an FTP context * * Returns -1 in case of error, 0 otherwise. */ int ftp_connect(netcam_context_ptr netcam) { ftp_context_pointer ctxt; struct hostent *hp; int port; int res; int addrlen = sizeof (struct sockaddr_in); if (netcam == NULL) return -1; ctxt = netcam->ftp; if (ctxt == NULL) return -1; if (netcam->connect_host == NULL) return -1; /* Do the blocking DNS query. */ port = netcam->connect_port; if (port == 0) port = 21; memset (&ctxt->ftp_address, 0, sizeof(ctxt->ftp_address)); hp = gethostbyname (netcam->connect_host); if (hp == NULL) { MOTION_LOG(ERR, TYPE_NETCAM, SHOW_ERRNO, "%s: gethostbyname failed in ftp_connect"); return -1; } if ((unsigned int) hp->h_length > sizeof(((struct sockaddr_in *)&ctxt->ftp_address)->sin_addr)) { MOTION_LOG(ERR, TYPE_NETCAM, SHOW_ERRNO, "%s: gethostbyname address mismatch " "in ftp_connect"); return -1; } /* Prepare the socket */ ((struct sockaddr_in *)&ctxt->ftp_address)->sin_family = AF_INET; memcpy (&((struct sockaddr_in *)&ctxt->ftp_address)->sin_addr, hp->h_addr_list[0], hp->h_length); ((struct sockaddr_in *)&ctxt->ftp_address)->sin_port = (u_short)htons ((unsigned short)port); ctxt->control_file_desc = socket (AF_INET, SOCK_STREAM, 0); addrlen = sizeof (struct sockaddr_in); if (ctxt->control_file_desc < 0) { MOTION_LOG(ERR, TYPE_NETCAM, SHOW_ERRNO, "%s: socket failed"); return -1; } /* Do the connect. */ if (connect(ctxt->control_file_desc, (struct sockaddr *) &ctxt->ftp_address, addrlen) < 0) { MOTION_LOG(ERR, TYPE_NETCAM, SHOW_ERRNO, "%s: Failed to create a connection"); close(ctxt->control_file_desc); ctxt->control_file_desc = -1; return -1; } /* Wait for the HELLO from the server. */ res = ftp_get_response(ctxt); if (res != 2) { close(ctxt->control_file_desc); ctxt->control_file_desc = -1; return -1; } /* Do the authentication */ res = ftp_send_user(ctxt); if (res < 0) { close(ctxt->control_file_desc); ctxt->control_file_desc = -1; return -1; } res = ftp_get_response(ctxt); switch (res) { case 2: return 0; case 3: break; case 1: case 4: case 5: case -1: default: close(ctxt->control_file_desc); ctxt->control_file_desc = -1; return -1; } res = ftp_send_passwd(ctxt); if (res < 0) { close(ctxt->control_file_desc); ctxt->control_file_desc = -1; return -1; } res = ftp_get_response(ctxt); switch (res) { case 2: break; case 3: MOTION_LOG(WRN, TYPE_NETCAM, NO_ERRNO, "%s: FTP server asking for ACCT on anonymous"); case 1: case 4: case 5: case -1: default: close(ctxt->control_file_desc); ctxt->control_file_desc = -1; ctxt->control_file_desc = -1; return-1; } return 0; }
/** * jpgutl_decode_jpeg * Purpose: Decompress the jpeg data_in into the img_out buffer. * * Parameters: * jpeg_data_in The jpeg data sent in * jpeg_data_len The length of the jpeg data * width The width of the image * height The height of the image * img_out Pointer to the image output * * Return Values * Success 0, Failure -1 */ int jpgutl_decode_jpeg (unsigned char *jpeg_data_in, int jpeg_data_len, unsigned int width, unsigned int height, unsigned char *volatile img_out) { JSAMPARRAY line; /* Array of decomp data lines */ unsigned char *wline; /* Will point to line[0] */ unsigned int i; unsigned char *img_y, *img_cb, *img_cr; unsigned char offset_y; struct jpeg_decompress_struct dinfo; struct jpgutl_error_mgr jerr; /* We set up the normal JPEG error routines, then override error_exit. */ dinfo.err = jpeg_std_error (&jerr.pub); jerr.pub.error_exit = jpgutl_error_exit; /* Also hook the emit_message routine to note corrupt-data warnings. */ jerr.original_emit_message = jerr.pub.emit_message; jerr.pub.emit_message = jpgutl_emit_message; jerr.warning_seen = 0; jpeg_create_decompress (&dinfo); /* Establish the setjmp return context for jpgutl_error_exit to use. */ if (setjmp (jerr.setjmp_buffer)) { /* If we get here, the JPEG code has signaled an error. */ jpeg_destroy_decompress (&dinfo); return -1; } jpgutl_buffer_src (&dinfo, jpeg_data_in, jpeg_data_len); jpeg_read_header (&dinfo, TRUE); //420 sampling is the default for YCbCr so no need to override. dinfo.out_color_space = JCS_YCbCr; dinfo.dct_method = JDCT_DEFAULT; guarantee_huff_tables(&dinfo); /* Required by older versions of the jpeg libs */ jpeg_start_decompress (&dinfo); if ((dinfo.output_width == 0) || (dinfo.output_height == 0)) { MOTION_LOG(WRN, TYPE_VIDEO, NO_ERRNO,_("Invalid JPEG image dimensions")); jpeg_destroy_decompress(&dinfo); return -1; } if ((dinfo.output_width != width) || (dinfo.output_height != height)) { MOTION_LOG(WRN, TYPE_VIDEO, NO_ERRNO ,_("JPEG image size %dx%d, JPEG was %dx%d") ,width, height, dinfo.output_width, dinfo.output_height); jpeg_destroy_decompress(&dinfo); return -1; } img_y = img_out; img_cb = img_y + dinfo.output_width * dinfo.output_height; img_cr = img_cb + (dinfo.output_width * dinfo.output_height) / 4; /* Allocate space for one line. */ line = (*dinfo.mem->alloc_sarray)((j_common_ptr) &dinfo, JPOOL_IMAGE, dinfo.output_width * dinfo.output_components, 1); wline = line[0]; offset_y = 0; while (dinfo.output_scanline < dinfo.output_height) { jpeg_read_scanlines(&dinfo, line, 1); for (i = 0; i < (dinfo.output_width * 3); i += 3) { img_y[i / 3] = wline[i]; if (i & 1) { img_cb[(i / 3) / 2] = wline[i + 1]; img_cr[(i / 3) / 2] = wline[i + 2]; } } img_y += dinfo.output_width; if (offset_y++ & 1) { img_cb += dinfo.output_width / 2; img_cr += dinfo.output_width / 2; } } jpeg_finish_decompress(&dinfo); jpeg_destroy_decompress(&dinfo); /* * If there are too many warnings, this means that * only a partial image could be returned which would * trigger many false positive motion detections */ if (jerr.warning_seen > 2) return -1; return 0; }