static av_cold int color_init(AVFilterContext *ctx, const char *args) { ColorContext *color = ctx->priv; char color_string[128] = "black"; char frame_size [128] = "320x240"; char frame_rate [128] = "25"; AVRational frame_rate_q; int ret; if (args) sscanf(args, "%127[^:]:%127[^:]:%127s", color_string, frame_size, frame_rate); if (av_parse_video_size(&color->w, &color->h, frame_size) < 0) { av_log(ctx, AV_LOG_ERROR, "Invalid frame size: %s\n", frame_size); return AVERROR(EINVAL); } if (av_parse_video_rate(&frame_rate_q, frame_rate) < 0 || frame_rate_q.den <= 0 || frame_rate_q.num <= 0) { av_log(ctx, AV_LOG_ERROR, "Invalid frame rate: %s\n", frame_rate); return AVERROR(EINVAL); } color->time_base.num = frame_rate_q.den; color->time_base.den = frame_rate_q.num; if ((ret = av_parse_color(color->color, color_string, -1, ctx)) < 0) return ret; return 0; }
int main(int argc, const char * argv[]) { // insert code here... const char *uri = "?cafile=file1"; char buf[1024]; const char *p = strchr(uri, '?'); if (!p) return 0 ; char *ca_file; if ( av_find_info_tag(buf, sizeof(buf), "cafile", p)) ca_file = av_strdupll(buf); printf("%s\n",ca_file); AVCodecContext context; AVCodecContext *dec = &context; char *canvas_size = "qntsc"; if (canvas_size && av_parse_video_size(&dec->width, &dec->height, canvas_size) < 0) { } return 0; }
static av_cold int source_init(AVFilterContext *ctx, const char *args, void *opaque) { Frei0rContext *frei0r = ctx->priv; char dl_name[1024], c; char frame_size[128] = ""; char frame_rate[128] = ""; AVRational frame_rate_q; memset(frei0r->params, 0, sizeof(frei0r->params)); if (args) sscanf(args, "%127[^:]:%127[^:]:%1023[^:=]%c%255c", frame_size, frame_rate, dl_name, &c, frei0r->params); if (av_parse_video_size(&frei0r->w, &frei0r->h, frame_size) < 0) { av_log(ctx, AV_LOG_ERROR, "Invalid frame size: '%s'\n", frame_size); return AVERROR(EINVAL); } if (av_parse_video_rate(&frame_rate_q, frame_rate) < 0 || frame_rate_q.den <= 0 || frame_rate_q.num <= 0) { av_log(ctx, AV_LOG_ERROR, "Invalid frame rate: '%s'\n", frame_rate); return AVERROR(EINVAL); } frei0r->time_base.num = frame_rate_q.den; frei0r->time_base.den = frame_rate_q.num; return frei0r_init(ctx, dl_name, F0R_PLUGIN_TYPE_SOURCE); }
static int rawvideo_read_header(AVFormatContext *ctx) { RawVideoDemuxerContext *s = ctx->priv_data; int width = 0, height = 0, ret = 0; enum AVPixelFormat pix_fmt; AVRational framerate; AVStream *st; st = avformat_new_stream(ctx, NULL); if (!st) return AVERROR(ENOMEM); st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = ctx->iformat->raw_codec_id; if (s->video_size && (ret = av_parse_video_size(&width, &height, s->video_size)) < 0) { av_log(ctx, AV_LOG_ERROR, "Couldn't parse video size.\n"); return ret; } if ((pix_fmt = av_get_pix_fmt(s->pixel_format)) == AV_PIX_FMT_NONE) { av_log(ctx, AV_LOG_ERROR, "No such pixel format: %s.\n", s->pixel_format); return AVERROR(EINVAL); } if ((ret = av_parse_video_rate(&framerate, s->framerate)) < 0) { av_log(ctx, AV_LOG_ERROR, "Could not parse framerate: %s.\n", s->framerate); return ret; } avpriv_set_pts_info(st, 64, framerate.den, framerate.num); st->codec->width = width; st->codec->height = height; st->codec->pix_fmt = pix_fmt; st->codec->bit_rate = av_rescale_q(avpicture_get_size(st->codec->pix_fmt, width, height), (AVRational){8,1}, st->time_base); return 0; }
static av_cold int source_init(AVFilterContext *ctx) { Frei0rContext *s = ctx->priv; AVRational frame_rate_q; if (av_parse_video_size(&s->w, &s->h, s->size) < 0) { av_log(ctx, AV_LOG_ERROR, "Invalid frame size: '%s'.\n", s->size); return AVERROR(EINVAL); } if (av_parse_video_rate(&frame_rate_q, s->framerate) < 0 || frame_rate_q.den <= 0 || frame_rate_q.num <= 0) { av_log(ctx, AV_LOG_ERROR, "Invalid frame rate: '%s'.\n", s->framerate); return AVERROR(EINVAL); } s->time_base.num = frame_rate_q.den; s->time_base.den = frame_rate_q.num; return frei0r_init(ctx, s->dl_name, F0R_PLUGIN_TYPE_SOURCE); }
//int main(){ void open_yuvfile(const char *filename) { if(av_parse_video_size(&dst_width,&dst_height,dst_size)<0){ fprintf(stderr,"Invalid size:'%s'\n",dst_size); exit(1); } src_file=fopen(filename,"rb"); if(!src_file){ fprintf(stderr,"could not open source file %s\n",filename); exit(1); } dst_file=fopen(dst_filename,"wb"); if(!dst_file){ fprintf(stderr,"could not open dst file %s\n",dst_filename); exit(1); } }
static av_cold int color_init(AVFilterContext *ctx) { ColorContext *color = ctx->priv; AVRational frame_rate_q; int ret; if (av_parse_video_size(&color->w, &color->h, color->size_str) < 0) { av_log(ctx, AV_LOG_ERROR, "Invalid frame size: %s\n", color->size_str); return AVERROR(EINVAL); } if (av_parse_video_rate(&frame_rate_q, color->framerate_str) < 0 || frame_rate_q.den <= 0 || frame_rate_q.num <= 0) { av_log(ctx, AV_LOG_ERROR, "Invalid frame rate: %s\n", color->framerate_str); return AVERROR(EINVAL); } color->time_base.num = frame_rate_q.den; color->time_base.den = frame_rate_q.num; if ((ret = av_parse_color(color->color, color->color_str, -1, ctx)) < 0) return ret; return 0; }
static int vfw_read_header(AVFormatContext *s) { struct vfw_ctx *ctx = s->priv_data; AVCodecContext *codec; AVStream *st; int devnum; int bisize; BITMAPINFO *bi = NULL; CAPTUREPARMS cparms; DWORD biCompression; WORD biBitCount; int ret; AVRational framerate_q; if (!strcmp(s->filename, "list")) { for (devnum = 0; devnum <= 9; devnum++) { char driver_name[256]; char driver_ver[256]; ret = capGetDriverDescription(devnum, driver_name, sizeof(driver_name), driver_ver, sizeof(driver_ver)); if (ret) { av_log(s, AV_LOG_INFO, "Driver %d\n", devnum); av_log(s, AV_LOG_INFO, " %s\n", driver_name); av_log(s, AV_LOG_INFO, " %s\n", driver_ver); } } return AVERROR(EIO); } ctx->hwnd = capCreateCaptureWindow(NULL, 0, 0, 0, 0, 0, HWND_MESSAGE, 0); if(!ctx->hwnd) { av_log(s, AV_LOG_ERROR, "Could not create capture window.\n"); return AVERROR(EIO); } /* If atoi fails, devnum==0 and the default device is used */ devnum = atoi(s->filename); ret = SendMessage(ctx->hwnd, WM_CAP_DRIVER_CONNECT, devnum, 0); if(!ret) { av_log(s, AV_LOG_ERROR, "Could not connect to device.\n"); DestroyWindow(ctx->hwnd); return AVERROR(ENODEV); } SendMessage(ctx->hwnd, WM_CAP_SET_OVERLAY, 0, 0); SendMessage(ctx->hwnd, WM_CAP_SET_PREVIEW, 0, 0); ret = SendMessage(ctx->hwnd, WM_CAP_SET_CALLBACK_VIDEOSTREAM, 0, (LPARAM) videostream_cb); if(!ret) { av_log(s, AV_LOG_ERROR, "Could not set video stream callback.\n"); goto fail; } SetWindowLongPtr(ctx->hwnd, GWLP_USERDATA, (LONG_PTR) s); st = avformat_new_stream(s, NULL); if(!st) { vfw_read_close(s); return AVERROR(ENOMEM); } /* Set video format */ bisize = SendMessage(ctx->hwnd, WM_CAP_GET_VIDEOFORMAT, 0, 0); if(!bisize) goto fail; bi = av_malloc(bisize); if(!bi) { vfw_read_close(s); return AVERROR(ENOMEM); } ret = SendMessage(ctx->hwnd, WM_CAP_GET_VIDEOFORMAT, bisize, (LPARAM) bi); if(!ret) goto fail; dump_bih(s, &bi->bmiHeader); ret = av_parse_video_rate(&framerate_q, ctx->framerate); if (ret < 0) { av_log(s, AV_LOG_ERROR, "Could not parse framerate '%s'.\n", ctx->framerate); goto fail; } if (ctx->video_size) { ret = av_parse_video_size(&bi->bmiHeader.biWidth, &bi->bmiHeader.biHeight, ctx->video_size); if (ret < 0) { av_log(s, AV_LOG_ERROR, "Couldn't parse video size.\n"); goto fail; } } if (0) { /* For testing yet unsupported compressions * Copy these values from user-supplied verbose information */ bi->bmiHeader.biWidth = 320; bi->bmiHeader.biHeight = 240; bi->bmiHeader.biPlanes = 1; bi->bmiHeader.biBitCount = 12; bi->bmiHeader.biCompression = MKTAG('I','4','2','0'); bi->bmiHeader.biSizeImage = 115200; dump_bih(s, &bi->bmiHeader); } ret = SendMessage(ctx->hwnd, WM_CAP_SET_VIDEOFORMAT, bisize, (LPARAM) bi); if(!ret) { av_log(s, AV_LOG_ERROR, "Could not set Video Format.\n"); goto fail; } biCompression = bi->bmiHeader.biCompression; biBitCount = bi->bmiHeader.biBitCount; /* Set sequence setup */ ret = SendMessage(ctx->hwnd, WM_CAP_GET_SEQUENCE_SETUP, sizeof(cparms), (LPARAM) &cparms); if(!ret) goto fail; dump_captureparms(s, &cparms); cparms.fYield = 1; // Spawn a background thread cparms.dwRequestMicroSecPerFrame = (framerate_q.den*1000000) / framerate_q.num; cparms.fAbortLeftMouse = 0; cparms.fAbortRightMouse = 0; cparms.fCaptureAudio = 0; cparms.vKeyAbort = 0; ret = SendMessage(ctx->hwnd, WM_CAP_SET_SEQUENCE_SETUP, sizeof(cparms), (LPARAM) &cparms); if(!ret) goto fail; codec = st->codec; codec->time_base = av_inv_q(framerate_q); codec->codec_type = AVMEDIA_TYPE_VIDEO; codec->width = bi->bmiHeader.biWidth; codec->height = bi->bmiHeader.biHeight; codec->pix_fmt = vfw_pixfmt(biCompression, biBitCount); if(codec->pix_fmt == AV_PIX_FMT_NONE) { codec->codec_id = vfw_codecid(biCompression); if(codec->codec_id == AV_CODEC_ID_NONE) { av_log(s, AV_LOG_ERROR, "Unknown compression type. " "Please report verbose (-v 9) debug information.\n"); vfw_read_close(s); return AVERROR_PATCHWELCOME; } codec->bits_per_coded_sample = biBitCount; } else { codec->codec_id = AV_CODEC_ID_RAWVIDEO; if(biCompression == BI_RGB) { codec->bits_per_coded_sample = biBitCount; codec->extradata = av_malloc(9 + AV_INPUT_BUFFER_PADDING_SIZE); if (codec->extradata) { codec->extradata_size = 9; memcpy(codec->extradata, "BottomUp", 9); } } } av_freep(&bi); avpriv_set_pts_info(st, 32, 1, 1000); ctx->mutex = CreateMutex(NULL, 0, NULL); if(!ctx->mutex) { av_log(s, AV_LOG_ERROR, "Could not create Mutex.\n" ); goto fail; } ctx->event = CreateEvent(NULL, 1, 0, NULL); if(!ctx->event) { av_log(s, AV_LOG_ERROR, "Could not create Event.\n" ); goto fail; } ret = SendMessage(ctx->hwnd, WM_CAP_SEQUENCE_NOFILE, 0, 0); if(!ret) { av_log(s, AV_LOG_ERROR, "Could not start capture sequence.\n" ); goto fail; } return 0; fail: av_freep(&bi); vfw_read_close(s); return AVERROR(EIO); }
/** * Initialize the x11 grab device demuxer (public device demuxer API). * * @param s1 Context from avformat core * @return <ul> * <li>AVERROR(ENOMEM) no memory left</li> * <li>AVERROR(EIO) other failure case</li> * <li>0 success</li> * </ul> */ static int x11grab_read_header(AVFormatContext *s1) { struct x11_grab *x11grab = s1->priv_data; Display *dpy; AVStream *st = NULL; enum PixelFormat input_pixfmt; XImage *image; int x_off = 0; int y_off = 0; int screen; int use_shm; char *dpyname, *offset; int ret = 0; AVRational framerate; dpyname = av_strdup(s1->filename); offset = strchr(dpyname, '+'); if (offset) { sscanf(offset, "%d,%d", &x_off, &y_off); x11grab->draw_mouse = !strstr(offset, "nomouse"); *offset= 0; } if ((ret = av_parse_video_size(&x11grab->width, &x11grab->height, x11grab->video_size)) < 0) { av_log(s1, AV_LOG_ERROR, "Couldn't parse video size.\n"); goto out; } if ((ret = av_parse_video_rate(&framerate, x11grab->framerate)) < 0) { av_log(s1, AV_LOG_ERROR, "Could not parse framerate: %s.\n", x11grab->framerate); goto out; } av_log(s1, AV_LOG_INFO, "device: %s -> display: %s x: %d y: %d width: %d height: %d\n", s1->filename, dpyname, x_off, y_off, x11grab->width, x11grab->height); dpy = XOpenDisplay(dpyname); av_freep(&dpyname); if(!dpy) { av_log(s1, AV_LOG_ERROR, "Could not open X display.\n"); ret = AVERROR(EIO); goto out; } st = avformat_new_stream(s1, NULL); if (!st) { ret = AVERROR(ENOMEM); goto out; } avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */ screen = DefaultScreen(dpy); if (x11grab->follow_mouse) { int screen_w, screen_h; Window w; screen_w = DisplayWidth(dpy, screen); screen_h = DisplayHeight(dpy, screen); XQueryPointer(dpy, RootWindow(dpy, screen), &w, &w, &x_off, &y_off, &ret, &ret, &ret); x_off -= x11grab->width / 2; y_off -= x11grab->height / 2; x_off = FFMIN(FFMAX(x_off, 0), screen_w - x11grab->width); y_off = FFMIN(FFMAX(y_off, 0), screen_h - x11grab->height); av_log(s1, AV_LOG_INFO, "followmouse is enabled, resetting grabbing region to x: %d y: %d\n", x_off, y_off); } use_shm = XShmQueryExtension(dpy); av_log(s1, AV_LOG_INFO, "shared memory extension%s found\n", use_shm ? "" : " not"); if(use_shm) { int scr = XDefaultScreen(dpy); image = XShmCreateImage(dpy, DefaultVisual(dpy, scr), DefaultDepth(dpy, scr), ZPixmap, NULL, &x11grab->shminfo, x11grab->width, x11grab->height); x11grab->shminfo.shmid = shmget(IPC_PRIVATE, image->bytes_per_line * image->height, IPC_CREAT|0777); if (x11grab->shminfo.shmid == -1) { av_log(s1, AV_LOG_ERROR, "Fatal: Can't get shared memory!\n"); ret = AVERROR(ENOMEM); goto out; } x11grab->shminfo.shmaddr = image->data = shmat(x11grab->shminfo.shmid, 0, 0); x11grab->shminfo.readOnly = False; if (!XShmAttach(dpy, &x11grab->shminfo)) { av_log(s1, AV_LOG_ERROR, "Fatal: Failed to attach shared memory!\n"); /* needs some better error subroutine :) */ ret = AVERROR(EIO); goto out; } } else { image = XGetImage(dpy, RootWindow(dpy, screen), x_off,y_off, x11grab->width, x11grab->height, AllPlanes, ZPixmap); } switch (image->bits_per_pixel) { case 8: av_log (s1, AV_LOG_DEBUG, "8 bit palette\n"); input_pixfmt = PIX_FMT_PAL8; break; case 16: if ( image->red_mask == 0xf800 && image->green_mask == 0x07e0 && image->blue_mask == 0x001f ) { av_log (s1, AV_LOG_DEBUG, "16 bit RGB565\n"); input_pixfmt = PIX_FMT_RGB565; } else if (image->red_mask == 0x7c00 && image->green_mask == 0x03e0 && image->blue_mask == 0x001f ) { av_log(s1, AV_LOG_DEBUG, "16 bit RGB555\n"); input_pixfmt = PIX_FMT_RGB555; } else { av_log(s1, AV_LOG_ERROR, "RGB ordering at image depth %i not supported ... aborting\n", image->bits_per_pixel); av_log(s1, AV_LOG_ERROR, "color masks: r 0x%.6lx g 0x%.6lx b 0x%.6lx\n", image->red_mask, image->green_mask, image->blue_mask); ret = AVERROR(EIO); goto out; } break; case 24: if ( image->red_mask == 0xff0000 && image->green_mask == 0x00ff00 && image->blue_mask == 0x0000ff ) { input_pixfmt = PIX_FMT_BGR24; } else if ( image->red_mask == 0x0000ff && image->green_mask == 0x00ff00 && image->blue_mask == 0xff0000 ) { input_pixfmt = PIX_FMT_RGB24; } else { av_log(s1, AV_LOG_ERROR,"rgb ordering at image depth %i not supported ... aborting\n", image->bits_per_pixel); av_log(s1, AV_LOG_ERROR, "color masks: r 0x%.6lx g 0x%.6lx b 0x%.6lx\n", image->red_mask, image->green_mask, image->blue_mask); ret = AVERROR(EIO); goto out; } break; case 32: input_pixfmt = PIX_FMT_0RGB32; break; default: av_log(s1, AV_LOG_ERROR, "image depth %i not supported ... aborting\n", image->bits_per_pixel); ret = AVERROR(EINVAL); goto out; } x11grab->frame_size = x11grab->width * x11grab->height * image->bits_per_pixel/8; x11grab->dpy = dpy; x11grab->time_base = (AVRational) { framerate.den, framerate.num }; x11grab->time_frame = av_gettime() / av_q2d(x11grab->time_base); x11grab->x_off = x_off; x11grab->y_off = y_off; x11grab->image = image; x11grab->use_shm = use_shm; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_RAWVIDEO; st->codec->width = x11grab->width; st->codec->height = x11grab->height; st->codec->pix_fmt = input_pixfmt; st->codec->time_base = x11grab->time_base; st->codec->bit_rate = x11grab->frame_size * 1/av_q2d(x11grab->time_base) * 8; out: return ret; }
static int dshow_read_header(AVFormatContext *avctx, AVFormatParameters *ap) { struct dshow_ctx *ctx = avctx->priv_data; IGraphBuilder *graph = NULL; ICreateDevEnum *devenum = NULL; IMediaControl *control = NULL; int ret = AVERROR(EIO); int r; if (!ctx->list_devices && !parse_device_name(avctx)) { av_log(avctx, AV_LOG_ERROR, "Malformed dshow input string.\n"); goto error; } if (ctx->video_size) { r = av_parse_video_size(&ctx->requested_width, &ctx->requested_height, ctx->video_size); if (r < 0) { av_log(avctx, AV_LOG_ERROR, "Could not parse video size '%s'.\n", ctx->video_size); goto error; } } if (ctx->framerate) { r = av_parse_video_rate(&ctx->requested_framerate, ctx->framerate); if (r < 0) { av_log(avctx, AV_LOG_ERROR, "Could not parse framerate '%s'.\n", ctx->framerate); goto error; } } CoInitialize(0); r = CoCreateInstance(&CLSID_FilterGraph, NULL, CLSCTX_INPROC_SERVER, &IID_IGraphBuilder, (void **) &graph); if (r != S_OK) { av_log(avctx, AV_LOG_ERROR, "Could not create capture graph.\n"); goto error; } ctx->graph = graph; r = CoCreateInstance(&CLSID_SystemDeviceEnum, NULL, CLSCTX_INPROC_SERVER, &IID_ICreateDevEnum, (void **) &devenum); if (r != S_OK) { av_log(avctx, AV_LOG_ERROR, "Could not enumerate system devices.\n"); goto error; } if (ctx->list_devices) { av_log(avctx, AV_LOG_INFO, "DirectShow video devices\n"); dshow_cycle_devices(avctx, devenum, VideoDevice, NULL); av_log(avctx, AV_LOG_INFO, "DirectShow audio devices\n"); dshow_cycle_devices(avctx, devenum, AudioDevice, NULL); ret = AVERROR_EXIT; goto error; } if (ctx->list_options) { if (ctx->device_name[VideoDevice]) dshow_list_device_options(avctx, devenum, VideoDevice); if (ctx->device_name[AudioDevice]) dshow_list_device_options(avctx, devenum, AudioDevice); ret = AVERROR_EXIT; goto error; } if (ctx->device_name[VideoDevice]) { ret = dshow_open_device(avctx, devenum, VideoDevice); if (ret < 0) goto error; ret = dshow_add_device(avctx, ap, VideoDevice); if (ret < 0) goto error; } if (ctx->device_name[AudioDevice]) { ret = dshow_open_device(avctx, devenum, AudioDevice); if (ret < 0) goto error; ret = dshow_add_device(avctx, ap, AudioDevice); if (ret < 0) goto error; } ctx->mutex = CreateMutex(NULL, 0, NULL); if (!ctx->mutex) { av_log(avctx, AV_LOG_ERROR, "Could not create Mutex\n"); goto error; } ctx->event = CreateEvent(NULL, 1, 0, NULL); if (!ctx->event) { av_log(avctx, AV_LOG_ERROR, "Could not create Event\n"); goto error; } r = IGraphBuilder_QueryInterface(graph, &IID_IMediaControl, (void **) &control); if (r != S_OK) { av_log(avctx, AV_LOG_ERROR, "Could not get media control.\n"); goto error; } ctx->control = control; r = IMediaControl_Run(control); if (r == S_FALSE) { OAFilterState pfs; r = IMediaControl_GetState(control, 0, &pfs); } if (r != S_OK) { av_log(avctx, AV_LOG_ERROR, "Could not run filter\n"); goto error; } ret = 0; error: if (ret < 0) dshow_read_close(avctx); if (devenum) ICreateDevEnum_Release(devenum); return ret; }
/** * Initialize the x11 grab device demuxer (public device demuxer API). * * @param s1 Context from avformat core * @param ap Parameters from avformat core * @return <ul> * <li>AVERROR(ENOMEM) no memory left</li> * <li>AVERROR(EIO) other failure case</li> * <li>0 success</li> * </ul> */ static int x11grab_read_header(AVFormatContext *s1, AVFormatParameters *ap) { struct x11_grab *x11grab = s1->priv_data; Display *dpy; AVStream *st = NULL; enum PixelFormat input_pixfmt; XImage *image; int x_off = 0; int y_off = 0; int use_shm; char *param, *offset; int ret = 0; AVRational framerate; param = av_strdup(s1->filename); offset = strchr(param, '+'); if (offset) { sscanf(offset, "%d,%d", &x_off, &y_off); x11grab->nomouse= strstr(offset, "nomouse"); *offset= 0; } if ((ret = av_parse_video_size(&x11grab->width, &x11grab->height, x11grab->video_size)) < 0) { av_log(s1, AV_LOG_ERROR, "Couldn't parse video size.\n"); goto out; } if ((ret = av_parse_video_rate(&framerate, x11grab->framerate)) < 0) { av_log(s1, AV_LOG_ERROR, "Could not parse framerate: %s.\n", x11grab->framerate); goto out; } #if FF_API_FORMAT_PARAMETERS if (ap->width > 0) x11grab->width = ap->width; if (ap->height > 0) x11grab->height = ap->height; if (ap->time_base.num) framerate = (AVRational){ap->time_base.den, ap->time_base.num}; #endif av_log(s1, AV_LOG_INFO, "device: %s -> display: %s x: %d y: %d width: %d height: %d\n", s1->filename, param, x_off, y_off, x11grab->width, x11grab->height); dpy = XOpenDisplay(param); if(!dpy) { av_log(s1, AV_LOG_ERROR, "Could not open X display.\n"); ret = AVERROR(EIO); goto out; } st = av_new_stream(s1, 0); if (!st) { ret = AVERROR(ENOMEM); goto out; } av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */ use_shm = XShmQueryExtension(dpy); av_log(s1, AV_LOG_INFO, "shared memory extension %s found\n", use_shm ? "" : "not"); if(use_shm) { int scr = XDefaultScreen(dpy); image = XShmCreateImage(dpy, DefaultVisual(dpy, scr), DefaultDepth(dpy, scr), ZPixmap, NULL, &x11grab->shminfo, x11grab->width, x11grab->height); x11grab->shminfo.shmid = shmget(IPC_PRIVATE, image->bytes_per_line * image->height, IPC_CREAT|0777); if (x11grab->shminfo.shmid == -1) { av_log(s1, AV_LOG_ERROR, "Fatal: Can't get shared memory!\n"); ret = AVERROR(ENOMEM); goto out; } x11grab->shminfo.shmaddr = image->data = shmat(x11grab->shminfo.shmid, 0, 0); x11grab->shminfo.readOnly = False; if (!XShmAttach(dpy, &x11grab->shminfo)) { av_log(s1, AV_LOG_ERROR, "Fatal: Failed to attach shared memory!\n"); /* needs some better error subroutine :) */ ret = AVERROR(EIO); goto out; } } else { image = XGetImage(dpy, RootWindow(dpy, DefaultScreen(dpy)), x_off,y_off, x11grab->width, x11grab->height, AllPlanes, ZPixmap); } switch (image->bits_per_pixel) { case 8: av_log (s1, AV_LOG_DEBUG, "8 bit palette\n"); input_pixfmt = PIX_FMT_PAL8; break; case 16: if ( image->red_mask == 0xf800 && image->green_mask == 0x07e0 && image->blue_mask == 0x001f ) { av_log (s1, AV_LOG_DEBUG, "16 bit RGB565\n"); input_pixfmt = PIX_FMT_RGB565; } else if (image->red_mask == 0x7c00 && image->green_mask == 0x03e0 && image->blue_mask == 0x001f ) { av_log(s1, AV_LOG_DEBUG, "16 bit RGB555\n"); input_pixfmt = PIX_FMT_RGB555; } else { av_log(s1, AV_LOG_ERROR, "RGB ordering at image depth %i not supported ... aborting\n", image->bits_per_pixel); av_log(s1, AV_LOG_ERROR, "color masks: r 0x%.6lx g 0x%.6lx b 0x%.6lx\n", image->red_mask, image->green_mask, image->blue_mask); ret = AVERROR(EIO); goto out; } break; case 24: if ( image->red_mask == 0xff0000 && image->green_mask == 0x00ff00 && image->blue_mask == 0x0000ff ) { input_pixfmt = PIX_FMT_BGR24; } else if ( image->red_mask == 0x0000ff && image->green_mask == 0x00ff00 && image->blue_mask == 0xff0000 ) { input_pixfmt = PIX_FMT_RGB24; } else { av_log(s1, AV_LOG_ERROR,"rgb ordering at image depth %i not supported ... aborting\n", image->bits_per_pixel); av_log(s1, AV_LOG_ERROR, "color masks: r 0x%.6lx g 0x%.6lx b 0x%.6lx\n", image->red_mask, image->green_mask, image->blue_mask); ret = AVERROR(EIO); goto out; } break; case 32: #if 0 GetColorInfo (image, &c_info); if ( c_info.alpha_mask == 0xff000000 && image->green_mask == 0x0000ff00) { /* byte order is relevant here, not endianness * endianness is handled by avcodec, but atm no such thing * as having ABGR, instead of ARGB in a word. Since we * need this for Solaris/SPARC, but need to do the conversion * for every frame we do it outside of this loop, cf. below * this matches both ARGB32 and ABGR32 */ input_pixfmt = PIX_FMT_ARGB32; } else { av_log(s1, AV_LOG_ERROR,"image depth %i not supported ... aborting\n", image->bits_per_pixel); return AVERROR(EIO); } #endif input_pixfmt = PIX_FMT_RGB32; break; default: av_log(s1, AV_LOG_ERROR, "image depth %i not supported ... aborting\n", image->bits_per_pixel); ret = AVERROR(EINVAL); goto out; } x11grab->frame_size = x11grab->width * x11grab->height * image->bits_per_pixel/8; x11grab->dpy = dpy; x11grab->time_base = (AVRational){framerate.den, framerate.num}; x11grab->time_frame = av_gettime() / av_q2d(x11grab->time_base); x11grab->x_off = x_off; x11grab->y_off = y_off; x11grab->image = image; x11grab->use_shm = use_shm; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = CODEC_ID_RAWVIDEO; st->codec->width = x11grab->width; st->codec->height = x11grab->height; st->codec->pix_fmt = input_pixfmt; st->codec->time_base = x11grab->time_base; st->codec->bit_rate = x11grab->frame_size * 1/av_q2d(x11grab->time_base) * 8; out: return ret; }
/* raw input */ int ff_raw_read_header(AVFormatContext *s, AVFormatParameters *ap) { AVStream *st; enum CodecID id; st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); id = s->iformat->value; if (id == CODEC_ID_RAWVIDEO) { st->codec->codec_type = AVMEDIA_TYPE_VIDEO; } else { st->codec->codec_type = AVMEDIA_TYPE_AUDIO; } st->codec->codec_id = id; switch(st->codec->codec_type) { case AVMEDIA_TYPE_AUDIO: { RawAudioDemuxerContext *s1 = s->priv_data; #if FF_API_FORMAT_PARAMETERS if (ap->sample_rate) st->codec->sample_rate = ap->sample_rate; if (ap->channels) st->codec->channels = ap->channels; else st->codec->channels = 1; #endif if (s1->sample_rate) st->codec->sample_rate = s1->sample_rate; if (s1->channels) st->codec->channels = s1->channels; st->codec->bits_per_coded_sample = av_get_bits_per_sample(st->codec->codec_id); assert(st->codec->bits_per_coded_sample > 0); st->codec->block_align = st->codec->bits_per_coded_sample*st->codec->channels/8; av_set_pts_info(st, 64, 1, st->codec->sample_rate); break; } case AVMEDIA_TYPE_VIDEO: { FFRawVideoDemuxerContext *s1 = s->priv_data; int width = 0, height = 0, ret = 0; enum PixelFormat pix_fmt; AVRational framerate; if (s1->video_size && (ret = av_parse_video_size(&width, &height, s1->video_size)) < 0) { av_log(s, AV_LOG_ERROR, "Couldn't parse video size.\n"); goto fail; } if ((pix_fmt = av_get_pix_fmt(s1->pixel_format)) == PIX_FMT_NONE) { av_log(s, AV_LOG_ERROR, "No such pixel format: %s.\n", s1->pixel_format); ret = AVERROR(EINVAL); goto fail; } if ((ret = av_parse_video_rate(&framerate, s1->framerate)) < 0) { av_log(s, AV_LOG_ERROR, "Could not parse framerate: %s.\n", s1->framerate); goto fail; } #if FF_API_FORMAT_PARAMETERS if (ap->width > 0) width = ap->width; if (ap->height > 0) height = ap->height; if (ap->pix_fmt) pix_fmt = ap->pix_fmt; if (ap->time_base.num) framerate = (AVRational){ap->time_base.den, ap->time_base.num}; #endif av_set_pts_info(st, 64, framerate.den, framerate.num); st->codec->width = width; st->codec->height = height; st->codec->pix_fmt = pix_fmt; fail: av_freep(&s1->video_size); av_freep(&s1->pixel_format); av_freep(&s1->framerate); return ret; } default: return -1; } return 0; }
/** * Initialize the x11 grab device demuxer (public device demuxer API). * * @param s1 Context from avformat core * @return <ul> * <li>AVERROR(ENOMEM) no memory left</li> * <li>AVERROR(EIO) other failure case</li> * <li>0 success</li> * </ul> */ static int x11grab_read_header(AVFormatContext *s1) { X11GrabContext *x11grab = s1->priv_data; Display *dpy; AVStream *st = NULL; XImage *image; int x_off = 0, y_off = 0, ret = 0, screen, use_shm; char *param, *offset; AVRational framerate; param = av_strdup(s1->filename); if (!param) goto out; offset = strchr(param, '+'); if (offset) { sscanf(offset, "%d,%d", &x_off, &y_off); x11grab->draw_mouse = !strstr(offset, "nomouse"); *offset = 0; } ret = av_parse_video_size(&x11grab->width, &x11grab->height, x11grab->video_size); if (ret < 0) { av_log(s1, AV_LOG_ERROR, "Couldn't parse video size.\n"); goto out; } ret = av_parse_video_rate(&framerate, x11grab->framerate); if (ret < 0) { av_log(s1, AV_LOG_ERROR, "Could not parse framerate: %s.\n", x11grab->framerate); goto out; } av_log(s1, AV_LOG_INFO, "device: %s -> display: %s x: %d y: %d width: %d height: %d\n", s1->filename, param, x_off, y_off, x11grab->width, x11grab->height); dpy = XOpenDisplay(param); if (!dpy) { av_log(s1, AV_LOG_ERROR, "Could not open X display.\n"); ret = AVERROR(EIO); goto out; } st = avformat_new_stream(s1, NULL); if (!st) { ret = AVERROR(ENOMEM); goto out; } avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */ screen = DefaultScreen(dpy); if (x11grab->follow_mouse) { int screen_w, screen_h; Window w; screen_w = DisplayWidth(dpy, screen); screen_h = DisplayHeight(dpy, screen); XQueryPointer(dpy, RootWindow(dpy, screen), &w, &w, &x_off, &y_off, &ret, &ret, &ret); x_off -= x11grab->width / 2; y_off -= x11grab->height / 2; x_off = FFMIN(FFMAX(x_off, 0), screen_w - x11grab->width); y_off = FFMIN(FFMAX(y_off, 0), screen_h - x11grab->height); av_log(s1, AV_LOG_INFO, "followmouse is enabled, resetting grabbing region to x: %d y: %d\n", x_off, y_off); } use_shm = XShmQueryExtension(dpy); av_log(s1, AV_LOG_INFO, "shared memory extension %sfound\n", use_shm ? "" : "not "); if (use_shm && setup_shm(s1, dpy, &image) < 0) { av_log(s1, AV_LOG_WARNING, "Falling back to XGetImage\n"); use_shm = 0; } if (!use_shm) { image = XGetImage(dpy, RootWindow(dpy, screen), x_off, y_off, x11grab->width, x11grab->height, AllPlanes, ZPixmap); } if (x11grab->draw_mouse && setup_mouse(dpy, screen) < 0) { av_log(s1, AV_LOG_WARNING, "XFixes not available, cannot draw the mouse cursor\n"); x11grab->draw_mouse = 0; } x11grab->frame_size = x11grab->width * x11grab->height * image->bits_per_pixel / 8; x11grab->dpy = dpy; x11grab->time_base = (AVRational) { framerate.den, framerate.num }; x11grab->time_frame = av_gettime() / av_q2d(x11grab->time_base); x11grab->x_off = x_off; x11grab->y_off = y_off; x11grab->image = image; x11grab->use_shm = use_shm; ret = pixfmt_from_image(s1, image, &st->codec->pix_fmt); if (ret < 0) goto out; st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = AV_CODEC_ID_RAWVIDEO; st->codec->width = x11grab->width; st->codec->height = x11grab->height; st->codec->time_base = x11grab->time_base; st->codec->bit_rate = x11grab->frame_size * 1 / av_q2d(x11grab->time_base) * 8; out: av_free(param); return ret; }
static int ffserver_parse_config_stream(FFServerConfig *config, const char *cmd, const char **p, FFServerStream **pstream) { char arg[1024], arg2[1024]; FFServerStream *stream; int val; av_assert0(pstream); stream = *pstream; if (!av_strcasecmp(cmd, "<Stream")) { char *q; FFServerStream *s; stream = av_mallocz(sizeof(FFServerStream)); if (!stream) return AVERROR(ENOMEM); config->dummy_actx = avcodec_alloc_context3(NULL); config->dummy_vctx = avcodec_alloc_context3(NULL); if (!config->dummy_vctx || !config->dummy_actx) { av_free(stream); avcodec_free_context(&config->dummy_vctx); avcodec_free_context(&config->dummy_actx); return AVERROR(ENOMEM); } config->dummy_actx->codec_type = AVMEDIA_TYPE_AUDIO; config->dummy_vctx->codec_type = AVMEDIA_TYPE_VIDEO; ffserver_get_arg(stream->filename, sizeof(stream->filename), p); q = strrchr(stream->filename, '>'); if (q) *q = '\0'; for (s = config->first_stream; s; s = s->next) { if (!strcmp(stream->filename, s->filename)) ERROR("Stream '%s' already registered\n", s->filename); } stream->fmt = ffserver_guess_format(NULL, stream->filename, NULL); if (stream->fmt) { config->guessed_audio_codec_id = stream->fmt->audio_codec; config->guessed_video_codec_id = stream->fmt->video_codec; } else { config->guessed_audio_codec_id = AV_CODEC_ID_NONE; config->guessed_video_codec_id = AV_CODEC_ID_NONE; } config->stream_use_defaults = config->use_defaults; *pstream = stream; return 0; } av_assert0(stream); if (!av_strcasecmp(cmd, "Feed")) { FFServerStream *sfeed; ffserver_get_arg(arg, sizeof(arg), p); sfeed = config->first_feed; while (sfeed) { if (!strcmp(sfeed->filename, arg)) break; sfeed = sfeed->next_feed; } if (!sfeed) ERROR("Feed with name '%s' for stream '%s' is not defined\n", arg, stream->filename); else stream->feed = sfeed; } else if (!av_strcasecmp(cmd, "Format")) { ffserver_get_arg(arg, sizeof(arg), p); if (!strcmp(arg, "status")) { stream->stream_type = STREAM_TYPE_STATUS; stream->fmt = NULL; } else { stream->stream_type = STREAM_TYPE_LIVE; /* JPEG cannot be used here, so use single frame MJPEG */ if (!strcmp(arg, "jpeg")) { strcpy(arg, "singlejpeg"); stream->single_frame=1; } stream->fmt = ffserver_guess_format(arg, NULL, NULL); if (!stream->fmt) ERROR("Unknown Format: '%s'\n", arg); } if (stream->fmt) { config->guessed_audio_codec_id = stream->fmt->audio_codec; config->guessed_video_codec_id = stream->fmt->video_codec; } } else if (!av_strcasecmp(cmd, "InputFormat")) { ffserver_get_arg(arg, sizeof(arg), p); stream->ifmt = av_find_input_format(arg); if (!stream->ifmt) ERROR("Unknown input format: '%s'\n", arg); } else if (!av_strcasecmp(cmd, "FaviconURL")) { if (stream->stream_type == STREAM_TYPE_STATUS) ffserver_get_arg(stream->feed_filename, sizeof(stream->feed_filename), p); else ERROR("FaviconURL only permitted for status streams\n"); } else if (!av_strcasecmp(cmd, "Author") || !av_strcasecmp(cmd, "Comment") || !av_strcasecmp(cmd, "Copyright") || !av_strcasecmp(cmd, "Title")) { char key[32]; int i; ffserver_get_arg(arg, sizeof(arg), p); for (i = 0; i < strlen(cmd); i++) key[i] = av_tolower(cmd[i]); key[i] = 0; WARNING("Deprecated '%s' option in configuration file. Use " "'Metadata %s VALUE' instead.\n", cmd, key); if (av_dict_set(&stream->metadata, key, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "Metadata")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_get_arg(arg2, sizeof(arg2), p); if (av_dict_set(&stream->metadata, arg, arg2, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "Preroll")) { ffserver_get_arg(arg, sizeof(arg), p); stream->prebuffer = atof(arg) * 1000; } else if (!av_strcasecmp(cmd, "StartSendOnKey")) { stream->send_on_key = 1; } else if (!av_strcasecmp(cmd, "AudioCodec")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_codec(config->dummy_actx, arg, config); } else if (!av_strcasecmp(cmd, "VideoCodec")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_codec(config->dummy_vctx, arg, config); } else if (!av_strcasecmp(cmd, "MaxTime")) { ffserver_get_arg(arg, sizeof(arg), p); stream->max_time = atof(arg) * 1000; } else if (!av_strcasecmp(cmd, "AudioBitRate")) { float f; ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_float_param(&f, arg, 1000, -FLT_MAX, FLT_MAX, config, "Invalid %s: '%s'\n", cmd, arg); if (ffserver_save_avoption_int("b", (int64_t)lrintf(f), AV_OPT_FLAG_AUDIO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "AudioChannels")) { ffserver_get_arg(arg, sizeof(arg), p); if (ffserver_save_avoption("ac", arg, AV_OPT_FLAG_AUDIO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "AudioSampleRate")) { ffserver_get_arg(arg, sizeof(arg), p); if (ffserver_save_avoption("ar", arg, AV_OPT_FLAG_AUDIO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoBitRateRange")) { int minrate, maxrate; char *dash; ffserver_get_arg(arg, sizeof(arg), p); dash = strchr(arg, '-'); if (dash) { *dash = '\0'; dash++; if (ffserver_set_int_param(&minrate, arg, 1000, 0, INT_MAX, config, "Invalid %s: '%s'", cmd, arg) >= 0 && ffserver_set_int_param(&maxrate, dash, 1000, 0, INT_MAX, config, "Invalid %s: '%s'", cmd, arg) >= 0) { if (ffserver_save_avoption_int("minrate", minrate, AV_OPT_FLAG_VIDEO_PARAM, config) < 0 || ffserver_save_avoption_int("maxrate", maxrate, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } } else ERROR("Incorrect format for VideoBitRateRange. It should be " "<min>-<max>: '%s'.\n", arg); } else if (!av_strcasecmp(cmd, "Debug")) { ffserver_get_arg(arg, sizeof(arg), p); if (ffserver_save_avoption("debug", arg, AV_OPT_FLAG_AUDIO_PARAM, config) < 0 || ffserver_save_avoption("debug", arg, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "Strict")) { ffserver_get_arg(arg, sizeof(arg), p); if (ffserver_save_avoption("strict", arg, AV_OPT_FLAG_AUDIO_PARAM, config) < 0 || ffserver_save_avoption("strict", arg, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoBufferSize")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(&val, arg, 8*1024, 0, INT_MAX, config, "Invalid %s: '%s'", cmd, arg); if (ffserver_save_avoption_int("bufsize", val, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoBitRateTolerance")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(&val, arg, 1000, INT_MIN, INT_MAX, config, "Invalid %s: '%s'", cmd, arg); if (ffserver_save_avoption_int("bt", val, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoBitRate")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(&val, arg, 1000, INT_MIN, INT_MAX, config, "Invalid %s: '%s'", cmd, arg); if (ffserver_save_avoption_int("b", val, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoSize")) { int ret, w, h; ffserver_get_arg(arg, sizeof(arg), p); ret = av_parse_video_size(&w, &h, arg); if (ret < 0) ERROR("Invalid video size '%s'\n", arg); else { if (w % 2 || h % 2) WARNING("Image size is not a multiple of 2\n"); if (ffserver_save_avoption("video_size", arg, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } } else if (!av_strcasecmp(cmd, "VideoFrameRate")) { ffserver_get_arg(&arg[2], sizeof(arg) - 2, p); arg[0] = '1'; arg[1] = '/'; if (ffserver_save_avoption("time_base", arg, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "PixelFormat")) { enum AVPixelFormat pix_fmt; ffserver_get_arg(arg, sizeof(arg), p); pix_fmt = av_get_pix_fmt(arg); if (pix_fmt == AV_PIX_FMT_NONE) ERROR("Unknown pixel format: '%s'\n", arg); else if (ffserver_save_avoption("pixel_format", arg, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoGopSize")) { ffserver_get_arg(arg, sizeof(arg), p); if (ffserver_save_avoption("g", arg, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoIntraOnly")) { if (ffserver_save_avoption("g", "1", AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoHighQuality")) { if (ffserver_save_avoption("mbd", "+bits", AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "Video4MotionVector")) { if (ffserver_save_avoption("mbd", "+bits", AV_OPT_FLAG_VIDEO_PARAM, config) < 0 || //FIXME remove ffserver_save_avoption("flags", "+mv4", AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "AVOptionVideo") || !av_strcasecmp(cmd, "AVOptionAudio")) { int ret; ffserver_get_arg(arg, sizeof(arg), p); ffserver_get_arg(arg2, sizeof(arg2), p); if (!av_strcasecmp(cmd, "AVOptionVideo")) ret = ffserver_save_avoption(arg, arg2, AV_OPT_FLAG_VIDEO_PARAM, config); else ret = ffserver_save_avoption(arg, arg2, AV_OPT_FLAG_AUDIO_PARAM, config); if (ret < 0) goto nomem; } else if (!av_strcasecmp(cmd, "AVPresetVideo") || !av_strcasecmp(cmd, "AVPresetAudio")) { ffserver_get_arg(arg, sizeof(arg), p); if (!av_strcasecmp(cmd, "AVPresetVideo")) ffserver_opt_preset(arg, AV_OPT_FLAG_VIDEO_PARAM, config); else ffserver_opt_preset(arg, AV_OPT_FLAG_AUDIO_PARAM, config); } else if (!av_strcasecmp(cmd, "VideoTag")) { ffserver_get_arg(arg, sizeof(arg), p); if (strlen(arg) == 4 && ffserver_save_avoption_int("codec_tag", MKTAG(arg[0], arg[1], arg[2], arg[3]), AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "BitExact")) { if (ffserver_save_avoption("flags", "+bitexact", AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "DctFastint")) { if (ffserver_save_avoption("dct", "fastint", AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "IdctSimple")) { if (ffserver_save_avoption("idct", "simple", AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "Qscale")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(&val, arg, 0, INT_MIN, INT_MAX, config, "Invalid Qscale: '%s'\n", arg); if (ffserver_save_avoption("flags", "+qscale", AV_OPT_FLAG_VIDEO_PARAM, config) < 0 || ffserver_save_avoption_int("global_quality", FF_QP2LAMBDA * val, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoQDiff")) { ffserver_get_arg(arg, sizeof(arg), p); if (ffserver_save_avoption("qdiff", arg, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoQMax")) { ffserver_get_arg(arg, sizeof(arg), p); if (ffserver_save_avoption("qmax", arg, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoQMin")) { ffserver_get_arg(arg, sizeof(arg), p); if (ffserver_save_avoption("qmin", arg, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "LumiMask")) { ffserver_get_arg(arg, sizeof(arg), p); if (ffserver_save_avoption("lumi_mask", arg, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "DarkMask")) { ffserver_get_arg(arg, sizeof(arg), p); if (ffserver_save_avoption("dark_mask", arg, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "NoVideo")) { config->no_video = 1; } else if (!av_strcasecmp(cmd, "NoAudio")) { config->no_audio = 1; } else if (!av_strcasecmp(cmd, "ACL")) { ffserver_parse_acl_row(stream, NULL, NULL, *p, config->filename, config->line_num); } else if (!av_strcasecmp(cmd, "DynamicACL")) { ffserver_get_arg(stream->dynamic_acl, sizeof(stream->dynamic_acl), p); } else if (!av_strcasecmp(cmd, "RTSPOption")) { ffserver_get_arg(arg, sizeof(arg), p); av_freep(&stream->rtsp_option); stream->rtsp_option = av_strdup(arg); } else if (!av_strcasecmp(cmd, "MulticastAddress")) { ffserver_get_arg(arg, sizeof(arg), p); if (resolve_host(&stream->multicast_ip, arg)) ERROR("Invalid host/IP address: '%s'\n", arg); stream->is_multicast = 1; stream->loop = 1; /* default is looping */ } else if (!av_strcasecmp(cmd, "MulticastPort")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(&val, arg, 0, 1, 65535, config, "Invalid MulticastPort: '%s'\n", arg); stream->multicast_port = val; } else if (!av_strcasecmp(cmd, "MulticastTTL")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(&val, arg, 0, INT_MIN, INT_MAX, config, "Invalid MulticastTTL: '%s'\n", arg); stream->multicast_ttl = val; } else if (!av_strcasecmp(cmd, "NoLoop")) { stream->loop = 0; } else if (!av_strcasecmp(cmd, "</Stream>")) { config->stream_use_defaults &= 1; if (stream->feed && stream->fmt && strcmp(stream->fmt->name, "ffm")) { if (config->dummy_actx->codec_id == AV_CODEC_ID_NONE) config->dummy_actx->codec_id = config->guessed_audio_codec_id; if (!config->no_audio && config->dummy_actx->codec_id != AV_CODEC_ID_NONE) { AVCodecContext *audio_enc = avcodec_alloc_context3(avcodec_find_encoder(config->dummy_actx->codec_id)); add_codec(stream, audio_enc, config); } if (config->dummy_vctx->codec_id == AV_CODEC_ID_NONE) config->dummy_vctx->codec_id = config->guessed_video_codec_id; if (!config->no_video && config->dummy_vctx->codec_id != AV_CODEC_ID_NONE) { AVCodecContext *video_enc = avcodec_alloc_context3(avcodec_find_encoder(config->dummy_vctx->codec_id)); add_codec(stream, video_enc, config); } } av_dict_free(&config->video_opts); av_dict_free(&config->audio_opts); avcodec_free_context(&config->dummy_vctx); avcodec_free_context(&config->dummy_actx); *pstream = NULL; } else if (!av_strcasecmp(cmd, "File") || !av_strcasecmp(cmd, "ReadOnlyFile")) { ffserver_get_arg(stream->feed_filename, sizeof(stream->feed_filename), p); } else if (!av_strcasecmp(cmd, "UseDefaults")) { if (config->stream_use_defaults > 1) WARNING("Multiple UseDefaults/NoDefaults entries.\n"); config->stream_use_defaults = 3; } else if (!av_strcasecmp(cmd, "NoDefaults")) { if (config->stream_use_defaults > 1) WARNING("Multiple UseDefaults/NoDefaults entries.\n"); config->stream_use_defaults = 2; } else { ERROR("Invalid entry '%s' inside <Stream></Stream>\n", cmd); } return 0; nomem: av_log(NULL, AV_LOG_ERROR, "Out of memory. Aborting.\n"); av_dict_free(&config->video_opts); av_dict_free(&config->audio_opts); avcodec_free_context(&config->dummy_vctx); avcodec_free_context(&config->dummy_actx); return AVERROR(ENOMEM); }
static int ffserver_parse_config_stream(FFServerConfig *config, const char *cmd, const char **p, int line_num, FFServerStream **pstream) { char arg[1024], arg2[1024]; FFServerStream *stream; int val; av_assert0(pstream); stream = *pstream; if (!av_strcasecmp(cmd, "<Stream")) { char *q; FFServerStream *s; stream = av_mallocz(sizeof(FFServerStream)); if (!stream) return AVERROR(ENOMEM); config->dummy_ctx = avcodec_alloc_context3(NULL); if (!config->dummy_ctx) { av_free(stream); return AVERROR(ENOMEM); } ffserver_get_arg(stream->filename, sizeof(stream->filename), p); q = strrchr(stream->filename, '>'); if (q) *q = '\0'; for (s = config->first_stream; s; s = s->next) { if (!strcmp(stream->filename, s->filename)) ERROR("Stream '%s' already registered\n", s->filename); } stream->fmt = ffserver_guess_format(NULL, stream->filename, NULL); if (stream->fmt) { config->audio_id = stream->fmt->audio_codec; config->video_id = stream->fmt->video_codec; } else { config->audio_id = AV_CODEC_ID_NONE; config->video_id = AV_CODEC_ID_NONE; } *pstream = stream; return 0; } av_assert0(stream); if (!av_strcasecmp(cmd, "Feed")) { FFServerStream *sfeed; ffserver_get_arg(arg, sizeof(arg), p); sfeed = config->first_feed; while (sfeed) { if (!strcmp(sfeed->filename, arg)) break; sfeed = sfeed->next_feed; } if (!sfeed) ERROR("Feed with name '%s' for stream '%s' is not defined\n", arg, stream->filename); else stream->feed = sfeed; } else if (!av_strcasecmp(cmd, "Format")) { ffserver_get_arg(arg, sizeof(arg), p); if (!strcmp(arg, "status")) { stream->stream_type = STREAM_TYPE_STATUS; stream->fmt = NULL; } else { stream->stream_type = STREAM_TYPE_LIVE; /* JPEG cannot be used here, so use single frame MJPEG */ if (!strcmp(arg, "jpeg")) strcpy(arg, "mjpeg"); stream->fmt = ffserver_guess_format(arg, NULL, NULL); if (!stream->fmt) ERROR("Unknown Format: %s\n", arg); } if (stream->fmt) { config->audio_id = stream->fmt->audio_codec; config->video_id = stream->fmt->video_codec; } } else if (!av_strcasecmp(cmd, "InputFormat")) { ffserver_get_arg(arg, sizeof(arg), p); stream->ifmt = av_find_input_format(arg); if (!stream->ifmt) ERROR("Unknown input format: %s\n", arg); } else if (!av_strcasecmp(cmd, "FaviconURL")) { if (stream->stream_type == STREAM_TYPE_STATUS) ffserver_get_arg(stream->feed_filename, sizeof(stream->feed_filename), p); else ERROR("FaviconURL only permitted for status streams\n"); } else if (!av_strcasecmp(cmd, "Author") || !av_strcasecmp(cmd, "Comment") || !av_strcasecmp(cmd, "Copyright") || !av_strcasecmp(cmd, "Title")) { char key[32]; int i; ffserver_get_arg(arg, sizeof(arg), p); for (i = 0; i < strlen(cmd); i++) key[i] = av_tolower(cmd[i]); key[i] = 0; WARNING("'%s' option in configuration file is deprecated, " "use 'Metadata %s VALUE' instead\n", cmd, key); if (av_dict_set(&stream->metadata, key, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "Metadata")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_get_arg(arg2, sizeof(arg2), p); if (av_dict_set(&stream->metadata, arg, arg2, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "Preroll")) { ffserver_get_arg(arg, sizeof(arg), p); stream->prebuffer = atof(arg) * 1000; } else if (!av_strcasecmp(cmd, "StartSendOnKey")) { stream->send_on_key = 1; } else if (!av_strcasecmp(cmd, "AudioCodec")) { ffserver_get_arg(arg, sizeof(arg), p); config->audio_id = opt_codec(arg, AVMEDIA_TYPE_AUDIO); if (config->audio_id == AV_CODEC_ID_NONE) ERROR("Unknown AudioCodec: %s\n", arg); } else if (!av_strcasecmp(cmd, "VideoCodec")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_id = opt_codec(arg, AVMEDIA_TYPE_VIDEO); if (config->video_id == AV_CODEC_ID_NONE) ERROR("Unknown VideoCodec: %s\n", arg); } else if (!av_strcasecmp(cmd, "MaxTime")) { ffserver_get_arg(arg, sizeof(arg), p); stream->max_time = atof(arg) * 1000; } else if (!av_strcasecmp(cmd, "AudioBitRate")) { float f; ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_float_param(&f, arg, 1000, 0, FLT_MAX, config, line_num, "Invalid %s: %s\n", cmd, arg); if (av_dict_set_int(&config->audio_conf, cmd, lrintf(f), 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "AudioChannels")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 0, 1, 8, config, line_num, "Invalid %s: %s, valid range is 1-8.", cmd, arg); if (av_dict_set(&config->audio_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "AudioSampleRate")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 0, 0, INT_MAX, config, line_num, "Invalid %s: %s", cmd, arg); if (av_dict_set(&config->audio_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoBitRateRange")) { int minrate, maxrate; ffserver_get_arg(arg, sizeof(arg), p); if (sscanf(arg, "%d-%d", &minrate, &maxrate) == 2) { if (av_dict_set_int(&config->video_conf, "VideoBitRateRangeMin", minrate, 0) < 0 || av_dict_set_int(&config->video_conf, "VideoBitRateRangeMax", maxrate, 0) < 0) goto nomem; } else ERROR("Incorrect format for VideoBitRateRange -- should be " "<min>-<max>: %s\n", arg); } else if (!av_strcasecmp(cmd, "Debug")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 0, INT_MIN, INT_MAX, config, line_num, "Invalid %s: %s", cmd, arg); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "Strict")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 0, INT_MIN, INT_MAX, config, line_num, "Invalid %s: %s", cmd, arg); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoBufferSize")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 8*1024, 0, INT_MAX, config, line_num, "Invalid %s: %s", cmd, arg); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoBitRateTolerance")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 1000, INT_MIN, INT_MAX, config, line_num, "Invalid %s: %s", cmd, arg); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoBitRate")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 1000, 0, INT_MAX, config, line_num, "Invalid %s: %s", cmd, arg); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoSize")) { int ret, w, h; ffserver_get_arg(arg, sizeof(arg), p); ret = av_parse_video_size(&w, &h, arg); if (ret < 0) ERROR("Invalid video size '%s'\n", arg); else if ((w % 16) || (h % 16)) ERROR("Image size must be a multiple of 16\n"); if (av_dict_set_int(&config->video_conf, "VideoSizeWidth", w, 0) < 0 || av_dict_set_int(&config->video_conf, "VideoSizeHeight", h, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoFrameRate")) { AVRational frame_rate; ffserver_get_arg(arg, sizeof(arg), p); if (av_parse_video_rate(&frame_rate, arg) < 0) { ERROR("Incorrect frame rate: %s\n", arg); } else { if (av_dict_set_int(&config->video_conf, "VideoFrameRateNum", frame_rate.num, 0) < 0 || av_dict_set_int(&config->video_conf, "VideoFrameRateDen", frame_rate.den, 0) < 0) goto nomem; } } else if (!av_strcasecmp(cmd, "PixelFormat")) { enum AVPixelFormat pix_fmt; ffserver_get_arg(arg, sizeof(arg), p); pix_fmt = av_get_pix_fmt(arg); if (pix_fmt == AV_PIX_FMT_NONE) ERROR("Unknown pixel format: %s\n", arg); if (av_dict_set_int(&config->video_conf, cmd, pix_fmt, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoGopSize")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 0, INT_MIN, INT_MAX, config, line_num, "Invalid %s: %s", cmd, arg); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoIntraOnly")) { if (av_dict_set(&config->video_conf, cmd, "1", 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoHighQuality")) { if (av_dict_set(&config->video_conf, cmd, "", 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "Video4MotionVector")) { if (av_dict_set(&config->video_conf, cmd, "", 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "AVOptionVideo") || !av_strcasecmp(cmd, "AVOptionAudio")) { int ret; ffserver_get_arg(arg, sizeof(arg), p); ffserver_get_arg(arg2, sizeof(arg2), p); if (!av_strcasecmp(cmd, "AVOptionVideo")) ret = ffserver_save_avoption(arg, arg2, &config->video_opts, AV_OPT_FLAG_VIDEO_PARAM ,config, line_num); else ret = ffserver_save_avoption(arg, arg2, &config->audio_opts, AV_OPT_FLAG_AUDIO_PARAM ,config, line_num); if (ret < 0) goto nomem; } else if (!av_strcasecmp(cmd, "AVPresetVideo") || !av_strcasecmp(cmd, "AVPresetAudio")) { char **preset = NULL; ffserver_get_arg(arg, sizeof(arg), p); if (!av_strcasecmp(cmd, "AVPresetVideo")) { preset = &config->video_preset; ffserver_opt_preset(arg, NULL, 0, NULL, &config->video_id); } else { preset = &config->audio_preset; ffserver_opt_preset(arg, NULL, 0, &config->audio_id, NULL); } *preset = av_strdup(arg); if (!preset) return AVERROR(ENOMEM); } else if (!av_strcasecmp(cmd, "VideoTag")) { ffserver_get_arg(arg, sizeof(arg), p); if (strlen(arg) == 4) { if (av_dict_set(&config->video_conf, "VideoTag", "arg", 0) < 0) goto nomem; } } else if (!av_strcasecmp(cmd, "BitExact")) { if (av_dict_set(&config->video_conf, cmd, "", 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "DctFastint")) { if (av_dict_set(&config->video_conf, cmd, "", 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "IdctSimple")) { if (av_dict_set(&config->video_conf, cmd, "", 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "Qscale")) { ffserver_get_arg(arg, sizeof(arg), p); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoQDiff")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 0, 1, 31, config, line_num, "%s out of range\n", cmd); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoQMax")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 0, 1, 31, config, line_num, "%s out of range\n", cmd); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoQMin")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 0, 1, 31, config, line_num, "%s out of range\n", cmd); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "LumiMask")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_float_param(NULL, arg, 0, -FLT_MAX, FLT_MAX, config, line_num, "Invalid %s: %s", cmd, arg); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "DarkMask")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_float_param(NULL, arg, 0, -FLT_MAX, FLT_MAX, config, line_num, "Invalid %s: %s", cmd, arg); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "NoVideo")) { config->video_id = AV_CODEC_ID_NONE; } else if (!av_strcasecmp(cmd, "NoAudio")) { config->audio_id = AV_CODEC_ID_NONE; } else if (!av_strcasecmp(cmd, "ACL")) { ffserver_parse_acl_row(stream, NULL, NULL, *p, config->filename, line_num); } else if (!av_strcasecmp(cmd, "DynamicACL")) { ffserver_get_arg(stream->dynamic_acl, sizeof(stream->dynamic_acl), p); } else if (!av_strcasecmp(cmd, "RTSPOption")) { ffserver_get_arg(arg, sizeof(arg), p); av_freep(&stream->rtsp_option); stream->rtsp_option = av_strdup(arg); } else if (!av_strcasecmp(cmd, "MulticastAddress")) { ffserver_get_arg(arg, sizeof(arg), p); if (resolve_host(&stream->multicast_ip, arg)) ERROR("Invalid host/IP address: %s\n", arg); stream->is_multicast = 1; stream->loop = 1; /* default is looping */ } else if (!av_strcasecmp(cmd, "MulticastPort")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(&val, arg, 0, 1, 65535, config, line_num, "Invalid MulticastPort: %s\n", arg); stream->multicast_port = val; } else if (!av_strcasecmp(cmd, "MulticastTTL")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(&val, arg, 0, INT_MIN, INT_MAX, config, line_num, "Invalid MulticastTTL: %s\n", arg); stream->multicast_ttl = val; } else if (!av_strcasecmp(cmd, "NoLoop")) { stream->loop = 0; } else if (!av_strcasecmp(cmd, "</Stream>")) { if (stream->feed && stream->fmt && strcmp(stream->fmt->name, "ffm")) { if (config->audio_id != AV_CODEC_ID_NONE) { AVCodecContext *audio_enc = avcodec_alloc_context3(avcodec_find_encoder(config->audio_id)); if (config->audio_preset && ffserver_opt_preset(arg, audio_enc, AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_ENCODING_PARAM, NULL, NULL) < 0) ERROR("Could not apply preset '%s'\n", arg); ffserver_apply_stream_config(audio_enc, config->audio_conf, &config->audio_opts); add_codec(stream, audio_enc); } if (config->video_id != AV_CODEC_ID_NONE) { AVCodecContext *video_enc = avcodec_alloc_context3(avcodec_find_encoder(config->video_id)); if (config->video_preset && ffserver_opt_preset(arg, video_enc, AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_ENCODING_PARAM, NULL, NULL) < 0) ERROR("Could not apply preset '%s'\n", arg); ffserver_apply_stream_config(video_enc, config->video_conf, &config->video_opts); add_codec(stream, video_enc); } } av_dict_free(&config->video_opts); av_dict_free(&config->video_conf); av_dict_free(&config->audio_opts); av_dict_free(&config->audio_conf); av_freep(&config->video_preset); av_freep(&config->audio_preset); avcodec_free_context(&config->dummy_ctx); *pstream = NULL; } else if (!av_strcasecmp(cmd, "File") || !av_strcasecmp(cmd, "ReadOnlyFile")) { ffserver_get_arg(stream->feed_filename, sizeof(stream->feed_filename), p); } else { ERROR("Invalid entry '%s' inside <Stream></Stream>\n", cmd); } return 0; nomem: av_log(NULL, AV_LOG_ERROR, "Out of memory. Aborting.\n"); av_dict_free(&config->video_opts); av_dict_free(&config->video_conf); av_dict_free(&config->audio_opts); av_dict_free(&config->audio_conf); av_freep(&config->video_preset); av_freep(&config->audio_preset); avcodec_free_context(&config->dummy_ctx); return AVERROR(ENOMEM); }
static int img_read_header(AVFormatContext *s1) { VideoDemuxData *s = s1->priv_data; int first_index, last_index, ret = 0; int width = 0, height = 0; AVStream *st; enum AVPixelFormat pix_fmt = AV_PIX_FMT_NONE; AVRational framerate; s1->ctx_flags |= AVFMTCTX_NOHEADER; st = avformat_new_stream(s1, NULL); if (!st) { return AVERROR(ENOMEM); } if (s->pixel_format && (pix_fmt = av_get_pix_fmt(s->pixel_format)) == AV_PIX_FMT_NONE) { av_log(s1, AV_LOG_ERROR, "No such pixel format: %s.\n", s->pixel_format); return AVERROR(EINVAL); } if (s->video_size && (ret = av_parse_video_size(&width, &height, s->video_size)) < 0) { av_log(s, AV_LOG_ERROR, "Could not parse video size: %s.\n", s->video_size); return ret; } if ((ret = av_parse_video_rate(&framerate, s->framerate)) < 0) { av_log(s, AV_LOG_ERROR, "Could not parse framerate: %s.\n", s->framerate); return ret; } av_strlcpy(s->path, s1->filename, sizeof(s->path)); s->img_number = 0; s->img_count = 0; /* find format */ if (s1->iformat->flags & AVFMT_NOFILE) s->is_pipe = 0; else { s->is_pipe = 1; st->need_parsing = AVSTREAM_PARSE_FULL; } avpriv_set_pts_info(st, 60, framerate.den, framerate.num); if (width && height) { st->codec->width = width; st->codec->height = height; } if (!s->is_pipe) { if (find_image_range(&first_index, &last_index, s->path, FFMAX(s->start_number, 5)) < 0) return AVERROR(ENOENT); s->img_first = first_index; s->img_last = last_index; s->img_number = s->start_number != 1 ? s->start_number : first_index; /* compute duration */ st->start_time = 0; st->duration = last_index - first_index + 1; } if (s1->video_codec_id) { st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = s1->video_codec_id; } else if (s1->audio_codec_id) { st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = s1->audio_codec_id; } else { st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = ff_guess_image2_codec(s->path); } if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && pix_fmt != AV_PIX_FMT_NONE) st->codec->pix_fmt = pix_fmt; return 0; }
/* raw input */ int ff_raw_read_header(AVFormatContext *s) { AVStream *st; enum AVCodecID id; st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); id = s->iformat->raw_codec_id; if (id == AV_CODEC_ID_RAWVIDEO) { st->codec->codec_type = AVMEDIA_TYPE_VIDEO; } else { st->codec->codec_type = AVMEDIA_TYPE_AUDIO; } st->codec->codec_id = id; switch(st->codec->codec_type) { case AVMEDIA_TYPE_AUDIO: { RawAudioDemuxerContext *s1 = s->priv_data; st->codec->channels = 1; if (id == AV_CODEC_ID_ADPCM_G722) st->codec->sample_rate = 16000; if (s1 && s1->sample_rate) st->codec->sample_rate = s1->sample_rate; if (s1 && s1->channels) st->codec->channels = s1->channels; st->codec->bits_per_coded_sample = av_get_bits_per_sample(st->codec->codec_id); assert(st->codec->bits_per_coded_sample > 0); st->codec->block_align = st->codec->bits_per_coded_sample*st->codec->channels/8; avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate); break; } case AVMEDIA_TYPE_VIDEO: { FFRawVideoDemuxerContext *s1 = s->priv_data; int width = 0, height = 0, ret = 0; enum AVPixelFormat pix_fmt; AVRational framerate; if (s1->video_size && (ret = av_parse_video_size(&width, &height, s1->video_size)) < 0) { av_log(s, AV_LOG_ERROR, "Couldn't parse video size.\n"); goto fail; } if ((pix_fmt = av_get_pix_fmt(s1->pixel_format)) == AV_PIX_FMT_NONE) { av_log(s, AV_LOG_ERROR, "No such pixel format: %s.\n", s1->pixel_format); ret = AVERROR(EINVAL); goto fail; } if ((ret = av_parse_video_rate(&framerate, s1->framerate)) < 0) { av_log(s, AV_LOG_ERROR, "Could not parse framerate: %s.\n", s1->framerate); goto fail; } avpriv_set_pts_info(st, 64, framerate.den, framerate.num); st->codec->width = width; st->codec->height = height; st->codec->pix_fmt = pix_fmt; fail: return ret; } default: return -1; } return 0; }
int main(int argc, char **argv) { uint8_t *src_data[4], *dst_data[4]; int src_linesize[4], dst_linesize[4]; int src_w = 320, src_h = 240, dst_w, dst_h; enum AVPixelFormat src_pix_fmt = AV_PIX_FMT_YUV420P, dst_pix_fmt = AV_PIX_FMT_RGB24; const char *dst_size = NULL; const char *dst_filename = NULL; FILE *dst_file; int dst_bufsize; struct SwsContext *sws_ctx; int i, ret; if (argc != 3) { fprintf(stderr, "Usage: %s output_file output_size\n" "API example program to show how to scale an image with libswscale.\n" "This program generates a series of pictures, rescales them to the given " "output_size and saves them to an output file named output_file\n." "\n", argv[0]); exit(1); } dst_filename = argv[1]; dst_size = argv[2]; if (av_parse_video_size(&dst_w, &dst_h, dst_size) < 0) { fprintf(stderr, "Invalid size '%s', must be in the form WxH or a valid size abbreviation\n", dst_size); exit(1); } dst_file = fopen(dst_filename, "wb"); if (!dst_file) { fprintf(stderr, "Could not open destination file %s\n", dst_filename); exit(1); } /* create scaling context */ sws_ctx = sws_getContext(src_w, src_h, src_pix_fmt, dst_w, dst_h, dst_pix_fmt, SWS_BILINEAR, NULL, NULL, NULL); if (!sws_ctx) { fprintf(stderr, "Impossible to create scale context for the conversion " "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n", av_get_pix_fmt_name(src_pix_fmt), src_w, src_h, av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h); ret = AVERROR(EINVAL); goto end; } /* allocate source and destination image buffers */ if ((ret = av_image_alloc(src_data, src_linesize, src_w, src_h, src_pix_fmt, 16)) < 0) { fprintf(stderr, "Could not allocate source image\n"); goto end; } /* buffer is going to be written to rawvideo file, no alignment */ if ((ret = av_image_alloc(dst_data, dst_linesize, dst_w, dst_h, dst_pix_fmt, 1)) < 0) { fprintf(stderr, "Could not allocate destination image\n"); goto end; } dst_bufsize = ret; for (i = 0; i < 100; i++) { /* generate synthetic video */ fill_yuv_image(src_data, src_linesize, src_w, src_h, i); /* convert to destination format */ sws_scale(sws_ctx, (const uint8_t * const*)src_data, src_linesize, 0, src_h, dst_data, dst_linesize); /* write scaled image to file */ fwrite(dst_data[0], 1, dst_bufsize, dst_file); } fprintf(stderr, "Scaling succeeded. Play the output file with the command:\n" "ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n", av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h, dst_filename); end: if (dst_file) fclose(dst_file); av_freep(&src_data[0]); av_freep(&dst_data[0]); sws_freeContext(sws_ctx); return ret < 0; }
static int ffserver_parse_config_stream(FFServerConfig *config, const char *cmd, const char **p, int line_num, FFServerStream **pstream) { char arg[1024], arg2[1024]; FFServerStream *stream; av_assert0(pstream); stream = *pstream; if (!av_strcasecmp(cmd, "<Stream")) { char *q; FFServerStream *s; stream = av_mallocz(sizeof(FFServerStream)); if (!stream) return AVERROR(ENOMEM); ffserver_get_arg(stream->filename, sizeof(stream->filename), p); q = strrchr(stream->filename, '>'); if (q) *q = '\0'; for (s = config->first_stream; s; s = s->next) { if (!strcmp(stream->filename, s->filename)) ERROR("Stream '%s' already registered\n", s->filename); } stream->fmt = ffserver_guess_format(NULL, stream->filename, NULL); avcodec_get_context_defaults3(&config->video_enc, NULL); avcodec_get_context_defaults3(&config->audio_enc, NULL); config->audio_id = AV_CODEC_ID_NONE; config->video_id = AV_CODEC_ID_NONE; if (stream->fmt) { config->audio_id = stream->fmt->audio_codec; config->video_id = stream->fmt->video_codec; } *pstream = stream; return 0; } av_assert0(stream); if (!av_strcasecmp(cmd, "Feed")) { FFServerStream *sfeed; ffserver_get_arg(arg, sizeof(arg), p); sfeed = config->first_feed; while (sfeed) { if (!strcmp(sfeed->filename, arg)) break; sfeed = sfeed->next_feed; } if (!sfeed) ERROR("Feed with name '%s' for stream '%s' is not defined\n", arg, stream->filename); else stream->feed = sfeed; } else if (!av_strcasecmp(cmd, "Format")) { ffserver_get_arg(arg, sizeof(arg), p); if (!strcmp(arg, "status")) { stream->stream_type = STREAM_TYPE_STATUS; stream->fmt = NULL; } else { stream->stream_type = STREAM_TYPE_LIVE; /* JPEG cannot be used here, so use single frame MJPEG */ if (!strcmp(arg, "jpeg")) strcpy(arg, "mjpeg"); stream->fmt = ffserver_guess_format(arg, NULL, NULL); if (!stream->fmt) ERROR("Unknown Format: %s\n", arg); } if (stream->fmt) { config->audio_id = stream->fmt->audio_codec; config->video_id = stream->fmt->video_codec; } } else if (!av_strcasecmp(cmd, "InputFormat")) { ffserver_get_arg(arg, sizeof(arg), p); stream->ifmt = av_find_input_format(arg); if (!stream->ifmt) ERROR("Unknown input format: %s\n", arg); } else if (!av_strcasecmp(cmd, "FaviconURL")) { if (stream->stream_type == STREAM_TYPE_STATUS) ffserver_get_arg(stream->feed_filename, sizeof(stream->feed_filename), p); else ERROR("FaviconURL only permitted for status streams\n"); } else if (!av_strcasecmp(cmd, "Author") || !av_strcasecmp(cmd, "Comment") || !av_strcasecmp(cmd, "Copyright") || !av_strcasecmp(cmd, "Title")) { char key[32]; int i, ret; ffserver_get_arg(arg, sizeof(arg), p); for (i = 0; i < strlen(cmd); i++) key[i] = av_tolower(cmd[i]); key[i] = 0; WARNING("'%s' option in configuration file is deprecated, " "use 'Metadata %s VALUE' instead\n", cmd, key); if ((ret = av_dict_set(&stream->metadata, key, arg, 0)) < 0) ERROR("Could not set metadata '%s' to value '%s': %s\n", key, arg, av_err2str(ret)); } else if (!av_strcasecmp(cmd, "Metadata")) { int ret; ffserver_get_arg(arg, sizeof(arg), p); ffserver_get_arg(arg2, sizeof(arg2), p); if ((ret = av_dict_set(&stream->metadata, arg, arg2, 0)) < 0) { ERROR("Could not set metadata '%s' to value '%s': %s\n", arg, arg2, av_err2str(ret)); } } else if (!av_strcasecmp(cmd, "Preroll")) { ffserver_get_arg(arg, sizeof(arg), p); stream->prebuffer = atof(arg) * 1000; } else if (!av_strcasecmp(cmd, "StartSendOnKey")) { stream->send_on_key = 1; } else if (!av_strcasecmp(cmd, "AudioCodec")) { ffserver_get_arg(arg, sizeof(arg), p); config->audio_id = opt_codec(arg, AVMEDIA_TYPE_AUDIO); if (config->audio_id == AV_CODEC_ID_NONE) ERROR("Unknown AudioCodec: %s\n", arg); } else if (!av_strcasecmp(cmd, "VideoCodec")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_id = opt_codec(arg, AVMEDIA_TYPE_VIDEO); if (config->video_id == AV_CODEC_ID_NONE) ERROR("Unknown VideoCodec: %s\n", arg); } else if (!av_strcasecmp(cmd, "MaxTime")) { ffserver_get_arg(arg, sizeof(arg), p); stream->max_time = atof(arg) * 1000; } else if (!av_strcasecmp(cmd, "AudioBitRate")) { ffserver_get_arg(arg, sizeof(arg), p); config->audio_enc.bit_rate = lrintf(atof(arg) * 1000); } else if (!av_strcasecmp(cmd, "AudioChannels")) { ffserver_get_arg(arg, sizeof(arg), p); config->audio_enc.channels = atoi(arg); } else if (!av_strcasecmp(cmd, "AudioSampleRate")) { ffserver_get_arg(arg, sizeof(arg), p); config->audio_enc.sample_rate = atoi(arg); } else if (!av_strcasecmp(cmd, "VideoBitRateRange")) { int minrate, maxrate; ffserver_get_arg(arg, sizeof(arg), p); if (sscanf(arg, "%d-%d", &minrate, &maxrate) == 2) { config->video_enc.rc_min_rate = minrate * 1000; config->video_enc.rc_max_rate = maxrate * 1000; } else ERROR("Incorrect format for VideoBitRateRange -- should be <min>-<max>: %s\n", arg); } else if (!av_strcasecmp(cmd, "Debug")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_enc.debug = strtol(arg,0,0); } else if (!av_strcasecmp(cmd, "Strict")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_enc.strict_std_compliance = atoi(arg); } else if (!av_strcasecmp(cmd, "VideoBufferSize")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_enc.rc_buffer_size = atoi(arg) * 8*1024; } else if (!av_strcasecmp(cmd, "VideoBitRateTolerance")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_enc.bit_rate_tolerance = atoi(arg) * 1000; } else if (!av_strcasecmp(cmd, "VideoBitRate")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_enc.bit_rate = atoi(arg) * 1000; } else if (!av_strcasecmp(cmd, "VideoSize")) { int ret; ffserver_get_arg(arg, sizeof(arg), p); ret = av_parse_video_size(&config->video_enc.width, &config->video_enc.height, arg); if (ret < 0) ERROR("Invalid video size '%s'\n", arg); else if ((config->video_enc.width % 16) != 0 || (config->video_enc.height % 16) != 0) ERROR("Image size must be a multiple of 16\n"); } else if (!av_strcasecmp(cmd, "VideoFrameRate")) { AVRational frame_rate; ffserver_get_arg(arg, sizeof(arg), p); if (av_parse_video_rate(&frame_rate, arg) < 0) { ERROR("Incorrect frame rate: %s\n", arg); } else { config->video_enc.time_base.num = frame_rate.den; config->video_enc.time_base.den = frame_rate.num; } } else if (!av_strcasecmp(cmd, "PixelFormat")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_enc.pix_fmt = av_get_pix_fmt(arg); if (config->video_enc.pix_fmt == AV_PIX_FMT_NONE) ERROR("Unknown pixel format: %s\n", arg); } else if (!av_strcasecmp(cmd, "VideoGopSize")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_enc.gop_size = atoi(arg); } else if (!av_strcasecmp(cmd, "VideoIntraOnly")) { config->video_enc.gop_size = 1; } else if (!av_strcasecmp(cmd, "VideoHighQuality")) { config->video_enc.mb_decision = FF_MB_DECISION_BITS; } else if (!av_strcasecmp(cmd, "Video4MotionVector")) { config->video_enc.mb_decision = FF_MB_DECISION_BITS; //FIXME remove config->video_enc.flags |= CODEC_FLAG_4MV; } else if (!av_strcasecmp(cmd, "AVOptionVideo") || !av_strcasecmp(cmd, "AVOptionAudio")) { AVCodecContext *avctx; int type; ffserver_get_arg(arg, sizeof(arg), p); ffserver_get_arg(arg2, sizeof(arg2), p); if (!av_strcasecmp(cmd, "AVOptionVideo")) { avctx = &config->video_enc; type = AV_OPT_FLAG_VIDEO_PARAM; } else { avctx = &config->audio_enc; type = AV_OPT_FLAG_AUDIO_PARAM; } if (ffserver_opt_default(arg, arg2, avctx, type|AV_OPT_FLAG_ENCODING_PARAM)) { ERROR("Error setting %s option to %s %s\n", cmd, arg, arg2); } } else if (!av_strcasecmp(cmd, "AVPresetVideo") || !av_strcasecmp(cmd, "AVPresetAudio")) { AVCodecContext *avctx; int type; ffserver_get_arg(arg, sizeof(arg), p); if (!av_strcasecmp(cmd, "AVPresetVideo")) { avctx = &config->video_enc; config->video_enc.codec_id = config->video_id; type = AV_OPT_FLAG_VIDEO_PARAM; } else { avctx = &config->audio_enc; config->audio_enc.codec_id = config->audio_id; type = AV_OPT_FLAG_AUDIO_PARAM; } if (ffserver_opt_preset(arg, avctx, type|AV_OPT_FLAG_ENCODING_PARAM, &config->audio_id, &config->video_id)) { ERROR("AVPreset error: %s\n", arg); } } else if (!av_strcasecmp(cmd, "VideoTag")) { ffserver_get_arg(arg, sizeof(arg), p); if (strlen(arg) == 4) config->video_enc.codec_tag = MKTAG(arg[0], arg[1], arg[2], arg[3]); } else if (!av_strcasecmp(cmd, "BitExact")) { config->video_enc.flags |= CODEC_FLAG_BITEXACT; } else if (!av_strcasecmp(cmd, "DctFastint")) { config->video_enc.dct_algo = FF_DCT_FASTINT; } else if (!av_strcasecmp(cmd, "IdctSimple")) { config->video_enc.idct_algo = FF_IDCT_SIMPLE; } else if (!av_strcasecmp(cmd, "Qscale")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_enc.flags |= CODEC_FLAG_QSCALE; config->video_enc.global_quality = FF_QP2LAMBDA * atoi(arg); } else if (!av_strcasecmp(cmd, "VideoQDiff")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_enc.max_qdiff = atoi(arg); if (config->video_enc.max_qdiff < 1 || config->video_enc.max_qdiff > 31) ERROR("VideoQDiff out of range\n"); } else if (!av_strcasecmp(cmd, "VideoQMax")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_enc.qmax = atoi(arg); if (config->video_enc.qmax < 1 || config->video_enc.qmax > 31) ERROR("VideoQMax out of range\n"); } else if (!av_strcasecmp(cmd, "VideoQMin")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_enc.qmin = atoi(arg); if (config->video_enc.qmin < 1 || config->video_enc.qmin > 31) ERROR("VideoQMin out of range\n"); } else if (!av_strcasecmp(cmd, "LumiMask")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_enc.lumi_masking = atof(arg); } else if (!av_strcasecmp(cmd, "DarkMask")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_enc.dark_masking = atof(arg); } else if (!av_strcasecmp(cmd, "NoVideo")) { config->video_id = AV_CODEC_ID_NONE; } else if (!av_strcasecmp(cmd, "NoAudio")) { config->audio_id = AV_CODEC_ID_NONE; } else if (!av_strcasecmp(cmd, "ACL")) { ffserver_parse_acl_row(stream, NULL, NULL, *p, config->filename, line_num); } else if (!av_strcasecmp(cmd, "DynamicACL")) { ffserver_get_arg(stream->dynamic_acl, sizeof(stream->dynamic_acl), p); } else if (!av_strcasecmp(cmd, "RTSPOption")) { ffserver_get_arg(arg, sizeof(arg), p); av_freep(&stream->rtsp_option); stream->rtsp_option = av_strdup(arg); } else if (!av_strcasecmp(cmd, "MulticastAddress")) { ffserver_get_arg(arg, sizeof(arg), p); if (resolve_host(&stream->multicast_ip, arg) != 0) ERROR("Invalid host/IP address: %s\n", arg); stream->is_multicast = 1; stream->loop = 1; /* default is looping */ } else if (!av_strcasecmp(cmd, "MulticastPort")) { ffserver_get_arg(arg, sizeof(arg), p); stream->multicast_port = atoi(arg); } else if (!av_strcasecmp(cmd, "MulticastTTL")) { ffserver_get_arg(arg, sizeof(arg), p); stream->multicast_ttl = atoi(arg); } else if (!av_strcasecmp(cmd, "NoLoop")) { stream->loop = 0; } else if (!av_strcasecmp(cmd, "</Stream>")) { if (stream->feed && stream->fmt && strcmp(stream->fmt->name, "ffm") != 0) { if (config->audio_id != AV_CODEC_ID_NONE) { config->audio_enc.codec_type = AVMEDIA_TYPE_AUDIO; config->audio_enc.codec_id = config->audio_id; add_codec(stream, &config->audio_enc); } if (config->video_id != AV_CODEC_ID_NONE) { config->video_enc.codec_type = AVMEDIA_TYPE_VIDEO; config->video_enc.codec_id = config->video_id; add_codec(stream, &config->video_enc); } } *pstream = NULL; } else if (!av_strcasecmp(cmd, "File") || !av_strcasecmp(cmd, "ReadOnlyFile")) { ffserver_get_arg(stream->feed_filename, sizeof(stream->feed_filename), p); } else { ERROR("Invalid entry '%s' inside <Stream></Stream>\n", cmd); } return 0; }