예제 #1
0
void ffmpeg_logger(void *, int level, const char *format, va_list va_args) {
	// We're still called even if the level doesn't match.
	if (level > av_log_get_level())
		return;

	char tmp[1024];
	vsnprintf(tmp, sizeof(tmp), format, va_args);
	tmp[sizeof(tmp) - 1] = '\0';

	// Strip off any trailing newline.
	size_t len = strlen(tmp);
	if (tmp[len - 1] == '\n')
		tmp[len - 1] = '\0';

	if (!strcmp(tmp, "GHA Phase shifting")) {
		Reporting::ReportMessage("Atrac3+: GHA phase shifting");
	}

	// Let's color the log line appropriately.
	if (level <= AV_LOG_PANIC) {
		ERROR_LOG(ME, "FF: %s", tmp);
	} else if (level >= AV_LOG_VERBOSE) {
		DEBUG_LOG(ME, "FF: %s", tmp);
	} else {
		INFO_LOG(ME, "FF: %s", tmp);
	}
}
예제 #2
0
파일: ffms.cpp 프로젝트: 1974kpkpkp/ffms2
void av_log_windebug_callback(void* ptr, int level, const char* fmt, va_list vl) {
	static int print_prefix=1;
	static int count;
	static char line[1024] = {0}, prev[1024] = {0};
	AVClass* avc = ptr ? *(AVClass**)ptr : NULL;
	if(level > av_log_get_level())
		return;

	int written = 0;
	if(print_prefix && avc) {
		written = snprintf(line, sizeof(line), "[%s @ %p]", avc->item_name(ptr), ptr);
	}

	written += vsnprintf(line + written, sizeof(line) - written, fmt, vl);

	print_prefix = line[written-1] == '\n';
	line[sizeof(line) - 1] = 0;
	if(print_prefix && !strcmp(line, prev)){
		count++;
		return;
	}
	if(count > 0){
		std::stringstream ss;
		ss << "    Last message repeated " << count << " times\n";
		OutputDebugStringA(ss.str().c_str());
		count = 0;
	}
	OutputDebugStringA(line);
	strcpy(prev, line);
}
예제 #3
0
/**
 * call back function to be registered for avlog
 */
static void log_cb(void* ptr, int level, const char* fmt, va_list vl)
{
	if (level > av_log_get_level())
		return;

	const char*name = NULL;
	FILE *flog;

	if (ccx_options.messages_target==CCX_MESSAGES_STDOUT)
		flog = stdout;
	else
		flog = stderr;

        if (level == AV_LOG_PANIC)
                fprintf(flog, "[panic][%s] ", name);
        else if (level == AV_LOG_FATAL)
                fprintf(flog, "[fatal][%s] ", name);
        else if (level == AV_LOG_ERROR)
                fprintf(flog, "[error][%s] ", name);
        else if (level == AV_LOG_WARNING)
                fprintf(flog, "[warning][%s] ", name);
        else if (level == AV_LOG_INFO)
                fprintf(flog, "[info][%s] ", name);
        else if (level == AV_LOG_DEBUG)
                fprintf(flog, "[debug][%s] ", name);

	vfprintf(flog, fmt, vl);
}
예제 #4
0
static void ffmpeg_log_cb(void* ptr, int level, const char* fmt, va_list vl)
{
    const char *LOG_SENDER = "ffmpeg";
    enum { LOG_LEVEL = 5 };
    char buf[100];
    int bufsize = sizeof(buf), len;
    pj_str_t fmt_st;

    /* Custom callback needs to filter log level by itself */
    if (level > av_log_get_level())
	return;
    
    /* Add original ffmpeg sender to log format */
    if (ptr) {
	AVClass* avc = *(AVClass**)ptr;
	len = pj_ansi_snprintf(buf, bufsize, "%s: ", avc->item_name(ptr));
	bufsize -= len;
    }

    /* Copy original log format */
    len = pj_ansi_strlen(fmt);
    if (len > bufsize-1)
	len = bufsize-1;
    pj_memcpy(buf+sizeof(buf)-bufsize, fmt, len);
    bufsize -= len;

    /* Trim log format */
    pj_strset(&fmt_st, buf, sizeof(buf)-bufsize);
    pj_strrtrim(&fmt_st);
    buf[fmt_st.slen] = '\0';

    pj_log(LOG_SENDER, LOG_LEVEL, buf, vl);
}
예제 #5
0
	static void logCb(void *ptr, int level, const char *fmt, va_list vargs)
	{
		if(level == AV_LOG_WARNING){

			/* HACK, can we extract this information from the headers structures somehow? */

			if(!strcmp(fmt, "DRM protected stream detected, decoding will likely fail!\n")){
				FlogI("DRM protected stream");
				drm = true;
			}

			else if(!strcmp(fmt, "Ext DRM protected stream detected, decoding will likely fail!\n")){
				FlogI("Ext DRM protected stream");
				drm = true;
			}

			else if(!strcmp(fmt, "Digital signature detected, decoding will likely fail!\n")){
				FlogI("Digitally signed stream");
				drm = true;
			}
		}

		if (level <= av_log_get_level()){
			char tmp[1024];
			vsprintf(tmp, fmt, vargs);

			FlogD("ffmpeg says: " << tmp);
		}
	}
예제 #6
0
파일: librtmp.c 프로젝트: AVbin/libav
/**
 * Open RTMP connection and verify that the stream can be played.
 *
 * URL syntax: rtmp://server[:port][/app][/playpath][ keyword=value]...
 *             where 'app' is first one or two directories in the path
 *             (e.g. /ondemand/, /flash/live/, etc.)
 *             and 'playpath' is a file name (the rest of the path,
 *             may be prefixed with "mp4:")
 *
 *             Additional RTMP library options may be appended as
 *             space-separated key-value pairs.
 */
static int rtmp_open(URLContext *s, const char *uri, int flags)
{
    LibRTMPContext *ctx = s->priv_data;
    RTMP *r = &ctx->rtmp;
    int rc = 0, level;
    char *filename = s->filename;

    switch (av_log_get_level()) {
    default:
    case AV_LOG_FATAL:   level = RTMP_LOGCRIT;    break;
    case AV_LOG_ERROR:   level = RTMP_LOGERROR;   break;
    case AV_LOG_WARNING: level = RTMP_LOGWARNING; break;
    case AV_LOG_INFO:    level = RTMP_LOGINFO;    break;
    case AV_LOG_VERBOSE: level = RTMP_LOGDEBUG;   break;
    case AV_LOG_DEBUG:   level = RTMP_LOGDEBUG2;  break;
    }
    RTMP_LogSetLevel(level);
    RTMP_LogSetCallback(rtmp_log);

    if (ctx->app || ctx->playpath) {
        int len = strlen(s->filename) + 1;
        if (ctx->app)      len += strlen(ctx->app)      + sizeof(" app=");
        if (ctx->playpath) len += strlen(ctx->playpath) + sizeof(" playpath=");

        if (!(filename = av_malloc(len)))
            return AVERROR(ENOMEM);

        av_strlcpy(filename, s->filename, len);
        if (ctx->app) {
            av_strlcat(filename, " app=", len);
            av_strlcat(filename, ctx->app, len);
        }
        if (ctx->playpath) {
            av_strlcat(filename, " playpath=", len);
            av_strlcat(filename, ctx->playpath, len);
        }
    }

    RTMP_Init(r);
    if (!RTMP_SetupURL(r, filename)) {
        rc = AVERROR_UNKNOWN;
        goto fail;
    }

    if (flags & AVIO_FLAG_WRITE)
        RTMP_EnableWrite(r);

    if (!RTMP_Connect(r, NULL) || !RTMP_ConnectStream(r, 0)) {
        rc = AVERROR_UNKNOWN;
        goto fail;
    }

    s->is_streamed = 1;
    rc = 0;
fail:
    if (filename != s->filename)
        av_freep(&filename);
    return rc;
}
// TODO, remove static variables to support multi-instances
void nam_av_log_callback(void* ptr, int level, const char* fmt, va_list vl)
{
    static int print_prefix = 1;
    static int count;
    static char prev[1024];
    char line[1024];
    static int is_atty;

    if (level > av_log_get_level())
        return;
    av_log_format_line(ptr, level, fmt, vl, line, sizeof(line), &print_prefix);

    if (print_prefix && (flags & AV_LOG_SKIP_REPEATED) && !strcmp(line, prev)){
        count++;
        return;
    }
    if (count > 0) {
        LOGI("Last message repeated %d times\n", count);
        count = 0;
    }
    strcpy(prev, line);
    sanitize((uint8_t *)line);

#if 0
    LOGI("%s", line);
#else
#define LOG_BUF_SIZE 1024
    static char g_msg[LOG_BUF_SIZE];
    static int g_msg_len = 0;

    int saw_lf, check_len;

    do {
        check_len = g_msg_len + strlen(line) + 1;
        if (check_len <= LOG_BUF_SIZE) {
            /* lf: Line feed ('\n') */
            saw_lf = (strchr(line, '\n') != NULL) ? 1 : 0;
            strncpy(g_msg + g_msg_len, line, strlen(line));
            g_msg_len += strlen(line);
            if (!saw_lf) {
               /* skip */
               return;
            } else {
               /* attach the line feed */
               g_msg_len += 1;
               g_msg[g_msg_len] = '\n';
            }
        } else {
            /* trace is fragmented */
            g_msg_len += 1;
            g_msg[g_msg_len] = '\n';
        }
        LOGI("%s", g_msg);
        /* reset g_msg and g_msg_len */
        memset(g_msg, 0, LOG_BUF_SIZE);
        g_msg_len = 0;
     } while (check_len > LOG_BUF_SIZE);
#endif
}
예제 #8
0
/**
 * Open RTMP connection and verify that the stream can be played.
 *
 * URL syntax: rtmp://server[:port][/app][/playpath][ keyword=value]...
 *             where 'app' is first one or two directories in the path
 *             (e.g. /ondemand/, /flash/live/, etc.)
 *             and 'playpath' is a file name (the rest of the path,
 *             may be prefixed with "mp4:")
 *
 *             Additional RTMP library options may be appended as
 *             space-separated key-value pairs.
 */
static int rtmp_open(URLContext *s, const char *uri, int flags)
{
	RTMP *r;
	int rc;

	r = av_mallocz(sizeof(RTMP));
	if (!r)
		return AVERROR(ENOMEM);

	switch (av_log_get_level())
	{
	default:
	case AV_LOG_FATAL:
		rc = RTMP_LOGCRIT;
		break;
	case AV_LOG_ERROR:
		rc = RTMP_LOGERROR;
		break;
	case AV_LOG_WARNING:
		rc = RTMP_LOGWARNING;
		break;
	case AV_LOG_INFO:
		rc = RTMP_LOGINFO;
		break;
	case AV_LOG_VERBOSE:
		rc = RTMP_LOGDEBUG;
		break;
	case AV_LOG_DEBUG:
		rc = RTMP_LOGDEBUG2;
		break;
	}
	RTMP_LogSetLevel(rc);
	RTMP_LogSetCallback(rtmp_log);

	RTMP_Init(r);
	if (!RTMP_SetupURL(r, s->filename))
	{
		rc = -1;
		goto fail;
	}

	if (flags & AVIO_WRONLY)
		RTMP_EnableWrite(r);

	if (!RTMP_Connect(r, NULL) || !RTMP_ConnectStream(r, 0))
	{
		rc = -1;
		goto fail;
	}

	s->priv_data   = r;
	s->is_streamed = 1;
	return 0;
fail:
	av_free(r);
	return rc;
}
예제 #9
0
/**
 * Open RTMFP connection and verify that the stream can be played.
 *
 * URL syntax: rtmp://server[:port][/app][/playpath][ keyword=value]...
 *             where 'app' is first one or two directories in the path
 *             (e.g. /ondemand/, /flash/live/, etc.)
 *             and 'playpath' is a file name (the rest of the path,
 *             may be prefixed with "mp4:")
 *
 *             Additional RTMFP library options may be appended as
 *             space-separated key-value pairs.
 */
static int rtmfp_open(URLContext *s, const char *uri, int flags)
{
    LibRTMFPContext *ctx = s->priv_data;
    int level = 0, res = 0;
    char *url = av_malloc(strlen(uri)+1);
    snprintf(url, strlen(uri)+1, uri);

    switch (av_log_get_level()) {
        case AV_LOG_FATAL:   level = 1; break;
        case AV_LOG_ERROR:   level = 3; break;
        case AV_LOG_WARNING: level = 4; break;
        default:
        case AV_LOG_INFO:    level = 6; break;
        case AV_LOG_DEBUG:   level = 7; break;
        case AV_LOG_VERBOSE: level = 8; break;
        case AV_LOG_TRACE:   level = 8; break;
    }
    RTMFP_Init(&ctx->rtmfp, &ctx->group);
    ctx->rtmfp.pOnSocketError = onSocketError;
    ctx->rtmfp.pOnStatusEvent = onStatusEvent;
    ctx->rtmfp.isBlocking = 1;

    RTMFP_LogSetLevel(level);
    RTMFP_LogSetCallback(rtmfp_log);
    /*RTMFP_ActiveDump();
    RTMFP_DumpSetCallback(rtmfp_dump);*/
    RTMFP_InterruptSetCallback(s->interrupt_callback.callback, s->interrupt_callback.opaque);

    RTMFP_GetPublicationAndUrlFromUri(url, &ctx->publication);

    if ((ctx->id = RTMFP_Connect(url, &ctx->rtmfp)) == 0)
        return -1;

    av_log(NULL, AV_LOG_INFO, "RTMFP Connect called : %d\n", ctx->id);
    if (ctx->netgroup) {
        ctx->group.netGroup = ctx->netgroup;
        ctx->group.availabilityUpdatePeriod = ctx->updatePeriod;
        ctx->group.windowDuration = ctx->windowDuration;
        ctx->group.pushLimit = ctx->pushLimit;
        ctx->group.isPublisher = (flags & AVIO_FLAG_WRITE) > 1;
        ctx->group.isBlocking = 1;
        res = RTMFP_Connect2Group(ctx->id, ctx->publication, &ctx->group);
    } else if (ctx->peerId)
        res = RTMFP_Connect2Peer(ctx->id, ctx->peerId, ctx->publication);
    else if (ctx->p2pPublishing)
        res = RTMFP_PublishP2P(ctx->id, ctx->publication, !ctx->audioUnbuffered, !ctx->videoUnbuffered, 1);
    else if (flags & AVIO_FLAG_WRITE)
        res = RTMFP_Publish(ctx->id, ctx->publication, !ctx->audioUnbuffered, !ctx->videoUnbuffered, 1);
    else
        res = RTMFP_Play(ctx->id, ctx->publication);

    if (!res)
        return -1;

    s->is_streamed = 1;
    return 0;
}
예제 #10
0
static void http_av_log(void *ptr, int level, const char *fmt, va_list vargs)
{
    static int print_prefix = 1;
    AVClass *avc = ptr ? *(AVClass**)ptr : NULL;
    if (level > av_log_get_level())
        return;
    if (print_prefix && avc)
        http_log("[%s @ %p]", avc->item_name(ptr), ptr);
    print_prefix = strstr(fmt, "\n") != NULL;
    http_vlog(fmt, vargs);
}
//------------------------------------------------------------------------------
// This is a slightly modified version of the default ffmpeg log callback
void log_callback(void* ptr, int level, const char* fmt, va_list vl)
{
    static int print_prefix = 1;
    static int count;
    static char prev[1024];
    char line[1024];
    std::string message = "";

    // Do not get logs we do not want
    if (level > av_log_get_level())
    {
        return;
    }

    // Let FFmpeg do the line formatting
    av_log_format_line(ptr, level, fmt, vl, line, 1024, &print_prefix);

    // We do not want repeated messages, just count them
    if (print_prefix && !strcmp(line, prev) && *line && line[strlen(line) - 1] != '\r')
    {
        count++;
        return;
    }
    if (count > 0) 
    {
        message.append("    Last message repeated " + boost::lexical_cast<std::string>(count) + " times\n");
        count = 0;
    }
    strcpy(prev, line);
    message.append(line);
    
    // Print/log the message
    if (!staticOgreLog)
    {
        std::cout << message << std::endl;
    }
    else
    {
        staticOgreLog->logMessage(message, Ogre::LML_NORMAL);
    }
}
예제 #12
0
파일: avbin.c 프로젝트: TimSC/AVbin
/**
 * Format log messages and call the user log callback.  Essentially a
 * reimplementation of libavutil/log.c:av_log_default_callback.
 */
static void avbin_log_callback(void *ptr,
                               int level,
                               const char *fmt,
                               va_list vl)
{
    static char message[8192];
    const char *module = NULL;

//    if (level > av_log_level || !user_log_callback)
    if (level > av_log_get_level() || !user_log_callback)
        return;

    if (ptr)
    {
        AVClass *avc = *(AVClass**) ptr;
        module = avc->item_name(ptr);
    }

    vsnprintf(message, sizeof message, fmt, vl);
    user_log_callback(module, (AVbinLogLevel) level, message);
}
예제 #13
0
파일: librtmp.c 프로젝트: 411697643/FFmpeg
/**
 * Open RTMP connection and verify that the stream can be played.
 *
 * URL syntax: rtmp://server[:port][/app][/playpath][ keyword=value]...
 *             where 'app' is first one or two directories in the path
 *             (e.g. /ondemand/, /flash/live/, etc.)
 *             and 'playpath' is a file name (the rest of the path,
 *             may be prefixed with "mp4:")
 *
 *             Additional RTMP library options may be appended as
 *             space-separated key-value pairs.
 */
static int rtmp_open(URLContext *s, const char *uri, int flags)
{
    LibRTMPContext *ctx = s->priv_data;
    RTMP *r = &ctx->rtmp;
    int rc = 0, level;
    char *filename = s->filename;
    int len = strlen(s->filename) + 1;

    switch (av_log_get_level()) {
    default:
    case AV_LOG_FATAL:   level = RTMP_LOGCRIT;    break;
    case AV_LOG_ERROR:   level = RTMP_LOGERROR;   break;
    case AV_LOG_WARNING: level = RTMP_LOGWARNING; break;
    case AV_LOG_INFO:    level = RTMP_LOGINFO;    break;
    case AV_LOG_VERBOSE: level = RTMP_LOGDEBUG;   break;
    case AV_LOG_DEBUG:   level = RTMP_LOGDEBUG2;  break;
    }
    RTMP_LogSetLevel(level);
    RTMP_LogSetCallback(rtmp_log);

    if (ctx->app)      len += strlen(ctx->app)      + sizeof(" app=");
    if (ctx->tcurl)    len += strlen(ctx->tcurl)    + sizeof(" tcUrl=");
    if (ctx->pageurl)  len += strlen(ctx->pageurl)  + sizeof(" pageUrl=");
    if (ctx->flashver) len += strlen(ctx->flashver) + sizeof(" flashver=");

    if (ctx->conn) {
        char *sep, *p = ctx->conn;
        int options = 0;

        while (p) {
            options++;
            p += strspn(p, " ");
            if (!*p)
                break;
            sep = strchr(p, ' ');
            if (sep)
                p = sep + 1;
            else
                break;
        }
        len += options * sizeof(" conn=");
        len += strlen(ctx->conn);
    }

    if (ctx->playpath)
        len += strlen(ctx->playpath) + sizeof(" playpath=");
    if (ctx->live)
        len += sizeof(" live=1");
    if (ctx->subscribe)
        len += strlen(ctx->subscribe) + sizeof(" subscribe=");

    if (ctx->client_buffer_time)
        len += strlen(ctx->client_buffer_time) + sizeof(" buffer=");

    if (ctx->swfurl || ctx->swfverify) {
        len += sizeof(" swfUrl=");

        if (ctx->swfverify)
            len += strlen(ctx->swfverify) + sizeof(" swfVfy=1");
        else
            len += strlen(ctx->swfurl);
    }

    if (!(ctx->temp_filename = filename = av_malloc(len)))
        return AVERROR(ENOMEM);

    av_strlcpy(filename, s->filename, len);
    if (ctx->app) {
        av_strlcat(filename, " app=", len);
        av_strlcat(filename, ctx->app, len);
    }
    if (ctx->tcurl) {
        av_strlcat(filename, " tcUrl=", len);
        av_strlcat(filename, ctx->tcurl, len);
    }
    if (ctx->pageurl) {
        av_strlcat(filename, " pageUrl=", len);
        av_strlcat(filename, ctx->pageurl, len);
    }
    if (ctx->swfurl) {
        av_strlcat(filename, " swfUrl=", len);
        av_strlcat(filename, ctx->swfurl, len);
    }
    if (ctx->flashver) {
        av_strlcat(filename, " flashVer=", len);
        av_strlcat(filename, ctx->flashver, len);
    }
    if (ctx->conn) {
        char *sep, *p = ctx->conn;
        while (p) {
            av_strlcat(filename, " conn=", len);
            p += strspn(p, " ");
            if (!*p)
                break;
            sep = strchr(p, ' ');
            if (sep)
                *sep = '\0';
            av_strlcat(filename, p, len);

            if (sep)
                p = sep + 1;
            else
                break;
        }
    }
    if (ctx->playpath) {
        av_strlcat(filename, " playpath=", len);
        av_strlcat(filename, ctx->playpath, len);
    }
    if (ctx->live)
        av_strlcat(filename, " live=1", len);
    if (ctx->subscribe) {
        av_strlcat(filename, " subscribe=", len);
        av_strlcat(filename, ctx->subscribe, len);
    }
    if (ctx->client_buffer_time) {
        av_strlcat(filename, " buffer=", len);
        av_strlcat(filename, ctx->client_buffer_time, len);
    }
    if (ctx->swfurl || ctx->swfverify) {
        av_strlcat(filename, " swfUrl=", len);

        if (ctx->swfverify) {
            av_strlcat(filename, ctx->swfverify, len);
            av_strlcat(filename, " swfVfy=1", len);
        } else {
            av_strlcat(filename, ctx->swfurl, len);
        }
    }

    RTMP_Init(r);
    if (!RTMP_SetupURL(r, filename)) {
        rc = AVERROR_UNKNOWN;
        goto fail;
    }

    if (flags & AVIO_FLAG_WRITE)
        RTMP_EnableWrite(r);

    if (!RTMP_Connect(r, NULL) || !RTMP_ConnectStream(r, 0)) {
        rc = AVERROR_UNKNOWN;
        goto fail;
    }

#if CONFIG_NETWORK
    if (ctx->buffer_size >= 0 && (flags & AVIO_FLAG_WRITE)) {
        int tmp = ctx->buffer_size;
        setsockopt(r->m_sb.sb_socket, SOL_SOCKET, SO_SNDBUF, &tmp, sizeof(tmp));
    }
#endif

    s->is_streamed = 1;
    return 0;
fail:
    av_freep(&ctx->temp_filename);
    if (rc)
        RTMP_Close(r);

    return rc;
}
      void FormatBaseStream::mhive_log_default_callback(void* ptr, int level, const char* fmt, va_list vl) {
        boost::mutex::scoped_lock log_lock(log_mutex);

        static int print_prefix = 1;
        static int count;
        static char line[4096], prev[4096];
        //char ptrString[10];
        //char ptrLine[4096];
        AVClass* avc = ptr ? *(AVClass**) ptr : NULL;
#undef fprintf
#ifdef __WIN32__
#define snprintf _snprintf
#endif
        if (print_prefix && avc) {
          snprintf(line, sizeof (line), "[%s @ %p]", avc->item_name(ptr), ptr);
          //snprintf(ptrString, sizeof (ptrString), "%p", ptr);
        } else {
          line[0] = 0;
          //ptrString[0] = 0;
          return;
        }

        vsnprintf(line + strlen(line), sizeof (line) - strlen(line), fmt, vl);
        std::string msg = org::esb::util::StringUtil::trim(line, "\n");
        //std::string msgPtr = org::esb::util::StringUtil::trim(line, "\n");
        /*
        if (logMap.count(ptrString)) {
          if (logMap[ptrString].size() > MAX_HISTORY) {
            logMap[ptrString].erase(--logMap[ptrString].end());
          }
        }
        logMap[ptrString].push_front(msgPtr);
        */
        /*filter out unwanted messages by loglevel*/
        try{
          if(level>av_log_get_level())return;
          switch (level) {
            case AV_LOG_DEBUG:
              LOGDEBUG(msg);
              break;
            case AV_LOG_INFO:
              LOGINFO(msg);
              break;
            case AV_LOG_ERROR:
              LOGERROR(msg);
              break;
            case AV_LOG_WARNING:
              LOGWARN(msg);
              break;
            case AV_LOG_PANIC:
              LOGFATAL(msg);
              break;
            case AV_LOG_VERBOSE:
              LOGTRACE(msg);
              break;
            default:
              LOGERROR("Unknown LogLevel:" << level << " - " << msg);
              break;
          }
        }catch(Poco::AssertionViolationException & ex){
          std::cout << "error logging"<< ex.displayText() <<std::endl;
          //LOGERROR("error in logging")
        }
      }
int main(int argc, char **argv){

    /* parameters expected from matlab as arguments, ideally as pointers: */
//	Buffer_in_int16_t *ST_buffer_in;

  	enum AVCodecID nFromMatlab_codec_id;//as defined in avcodec.h: http://ffmpeg.org/doxygen/trunk/libavcodec_2avcodec_8h_source.html
	nFromMatlab_codec_id=AV_CODEC_ID_AMR_WB;//http://ffmpeg.org/doxygen/trunk/group__lavc__core.html#ggaadca229ad2c20e060a14fec08a5cc7cea3d4c051c8b18a3c18750676a308c10be

    int nFromMatlab_Codec_bit_rate=23850;//WB: 23850,23050,19850,....,8850,6600  //Output bit_rate
                                         //NB: 12200,10200,7950,....5150,4750,1800(SID) http://en.wikipedia.org/wiki/Adaptive_Multi-Rate_audio_codec
    enum AVSampleFormat nFromMatlab_Codec_AV_SAMPLE_FMT;
    nFromMatlab_Codec_AV_SAMPLE_FMT=AV_SAMPLE_FMT_S16;//for amr is ignored, we hardcode to AV_SAMPLE_FMT_S16.
                                                      //as defined in samplefmt.h: http://ffmpeg.org/doxygen/trunk/samplefmt_8h_source.html
    int nFromMatlab_Codec_channels=1;           //for amr is ignored, we hardcode to 1 channel.


    int16_t *pnFromMatlab_audio_bufin_samples=NULL;//for demo, this will be init later, from a wav file...
    int nFromMatlab_audio_bufin_byte_size=-1;//for demo, this will be init later, based on frames to be read from file.

    int log_level=AV_LOG_VERBOSE;

    /* END Parameters expected from Matlab */

    av_log_set_level(log_level);
    av_log(NULL,AV_LOG_WARNING,"\n Starting Our code... \n");

	uint8_t *audio_outbuf;
	AVCodecContext *c;

    c = prepare_codec_context(nFromMatlab_codec_id,ENCODE,&nFromMatlab_Codec_bit_rate);

	///DEMO (FOR AMRWB):
	int number_of_frames=1;//Number of frames to be read from file.; 150*320=48000; 8kHz=> 6 sec.

	char *filename_wav=argv[1];
	filename_wav="c:\\_me\\PhD2014_ffmpeg\\mex\\amrwb2_retry\\a1_16khz.wav"; number_of_frames=150;
	//filename_wav="c:\\_me\\PhD2014_ffmpeg\\mex\\amrwb2_retry\\684.wav";number_of_frames=1;

	nFromMatlab_audio_bufin_byte_size=number_of_frames*c->frame_size*av_get_bytes_per_sample(c->sample_fmt);
	pnFromMatlab_audio_bufin_samples=generate_audio_frame_from_file(filename_wav,&nFromMatlab_audio_bufin_byte_size);
	//bytes_to_be_read_from_file=(*ST_buffer_in).size_of_buff;
	//mergeOK: write_raw_to_file((*ST_buffer_in).audio_bufin_samples, "before_amr_wb", 1, bytes_to_be_read_from_file);

	//audio_bufin_samples=malloc(bytes_to_be_read_from_file);
	//audio_bufin_samples=memcpy(audio_bufin_samples,(*ST_buffer_in).audio_bufin_samples, bytes_to_be_read_from_file);

	///END DEMO (FOR AMRWB)


	/// Performing magic...
	audio_outbuf=main_encode2(c,pnFromMatlab_audio_bufin_samples,nFromMatlab_audio_bufin_byte_size);

    if (av_log_get_level()>=AV_LOG_DEBUG)
        dump_codec_context(c); //for debug

    //If matlab pointers, don't free them! :)
    close_free(c, pnFromMatlab_audio_bufin_samples, audio_outbuf);

    exit_program(0);

return  0;

} 
예제 #16
0
//----------------------------------------------------------------
// main_utf8
// 
int main_utf8(int argc, char **argv)
{
    const char *input = NULL;
    const char *output_prefix = "";
    double target_segment_duration = 0.0;
    char *segment_duration_check = NULL;
    const char *playlist_filename = NULL;
    const char *http_prefix = "";
    long max_tsfiles = 0;
    char *max_tsfiles_check = NULL;
    double prev_segment_time = 0.0;
    double segment_duration = 0.0;
    unsigned int output_index = 0;
    const AVClass *fc = avformat_get_class();
    AVDictionary *format_opts = NULL;
    AVOutputFormat *ofmt = NULL;
    AVFormatContext *ic = NULL;
    AVFormatContext *oc = NULL;
    AVStream *video_st = NULL;
    AVStream *audio_st = NULL;
    AVCodec *codec = NULL;
    char *output_filename = NULL;
    char *pid_filename = NULL;
    int video_index = -1;
    int audio_index = -1;
    int kill_file = 0;
    int decode_done = 0;
    int ret = 0;
    int i = 0;
    TSMStreamLace * streamLace = NULL;
    TSMPlaylist * playlist = NULL;
    const double segment_duration_error_tolerance = 0.05;
    double extra_duration_needed = 0;
    int strict_segment_duration = 0;
    
    av_log_set_level(AV_LOG_INFO);
    
    for (int i = 1; i < argc; i++)
    {
        if (strcmp(argv[i], "-i") == 0)
        {
            if ((argc - i) <= 1) usage(argv, "could not parse -i parameter");
            i++;
            input = argv[i];
        }
        else if (strcmp(argv[i], "-o") == 0)
        {
            if ((argc - i) <= 1) usage(argv, "could not parse -i parameter");
            i++;
            output_prefix = argv[i];
        }
        else if (strcmp(argv[i], "-d") == 0)
        {
            if ((argc - i) <= 1) usage(argv, "could not parse -d parameter");
            i++;
            
            target_segment_duration = strtod(argv[i], &segment_duration_check);
            if (segment_duration_check == argv[i] ||
                target_segment_duration == HUGE_VAL ||
                target_segment_duration == -HUGE_VAL)
            {
                usage3(argv, "invalid segment duration: ", argv[i]);
            }
        }
        else if (strcmp(argv[i], "-x") == 0)
        {
            if ((argc - i) <= 1) usage(argv, "could not parse -x parameter");
            i++;
            playlist_filename = argv[i];
        }
        else if (strcmp(argv[i], "-p") == 0)
        {
            if ((argc - i) <= 1) usage(argv, "could not parse -p parameter");
            i++;
            http_prefix = argv[i];
        }
        else if (strcmp(argv[i], "-w") == 0)
        {
            if ((argc - i) <= 1) usage(argv, "could not parse -w parameter");
            i++;

            max_tsfiles = strtol(argv[i], &max_tsfiles_check, 10);
            if (max_tsfiles_check == argv[i] ||
                max_tsfiles < 0 ||
                max_tsfiles >= INT_MAX)
            {
                usage3(argv, "invalid live stream max window size: ", argv[i]);
            }
        }
        else if (strcmp(argv[i], "-P") == 0)
        {
            if ((argc - i) <= 1) usage(argv, "could not parse -P parameter");
            i++;
            pid_filename = argv[i];
        }
        else if (strcmp(argv[i], "--watch-for-kill-file") == 0)
        {
            // end program when it finds a file with name 'kill':
            kill_file = 1;
        }
        else if (strcmp(argv[i], "--strict-segment-duration") == 0)
        {
            // force segment creation on non-keyframe boundaries:
            strict_segment_duration = 1;
        }
        else if (strcmp(argv[i], "--avformat-option") == 0)
        {
            const AVOption *of;
            const char *opt;
            const char *arg;
            if ((argc - i) <= 1) usage(argv, "could not parse --avformat-option parameter");
            i++;
            opt = argv[i];
            if ((argc - i) <= 1) usage(argv, "could not parse --avformat-option parameter");
            i++;
            arg = argv[i];

            if ((of = av_opt_find(&fc, opt, NULL, 0,
                                  AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)))
                av_dict_set(&format_opts, opt, arg, (of->type == AV_OPT_TYPE_FLAGS) ? AV_DICT_APPEND : 0);
            else
                usage3(argv, "unknown --avformat-option parameter: ", opt);
        }
        else if (strcmp(argv[i], "--loglevel") == 0)
        {
            const char *arg;
            if ((argc - i) <= 1) usage(argv, "could not parse --loglevel parameter");
            i++;
            arg = argv[i];

            if (loglevel(arg))
                usage3(argv, "unknown --loglevel parameter: ", arg);
        }
    }
    
    if (!input)
    {
        usage(argv, "-i input file parameter must be specified");
    }
    
    if (!playlist_filename)
    {
        usage(argv, "-x m3u8 playlist file parameter must be specified");
    }
    
    if (target_segment_duration == 0.0)
    {
        usage(argv, "-d segment duration parameter must be specified");
    }
    
    // Create PID file
    if (pid_filename)
    {
        FILE* pid_file = fopen_utf8(pid_filename, "wb");
        if (pid_file)
        {
            fprintf(pid_file, "%d", getpid());
            fclose(pid_file);
        }
    }

    av_register_all();
    avformat_network_init();

    if (!strcmp(input, "-")) {
        input = "pipe:";
    }
    
    output_filename = malloc(sizeof(char) * (strlen(output_prefix) + 15));
    if (!output_filename) {
        fprintf(stderr, "Could not allocate space for output filenames\n");
        goto error;
    }

    playlist = createPlaylist(max_tsfiles,
                              target_segment_duration,
                              http_prefix);
    if (!playlist)
    {
        fprintf(stderr, "Could not allocate space for m3u8 playlist structure\n");
        goto error;
    }

    ret = avformat_open_input(&ic, input, NULL, (format_opts) ? &format_opts : NULL);
    if (ret != 0) {
        fprintf(stderr, "Could not open input file, make sure it is an mpegts or mp4 file: %d\n", ret);
        goto error;
    }
    av_dict_free(&format_opts);

    if (avformat_find_stream_info(ic, NULL) < 0) {
        fprintf(stderr, "Could not read stream information\n");
        goto error;
    }

#if LIBAVFORMAT_VERSION_MAJOR > 52 || (LIBAVFORMAT_VERSION_MAJOR == 52 && \
                                       LIBAVFORMAT_VERSION_MINOR >= 45)
    ofmt = av_guess_format("mpegts", NULL, NULL);
#else
    ofmt = guess_format("mpegts", NULL, NULL);
#endif
    
    if (!ofmt) {
        fprintf(stderr, "Could not find MPEG-TS muxer\n");
        goto error;
    }

    oc = avformat_alloc_context();
    if (!oc) {
        fprintf(stderr, "Could not allocated output context\n");
        goto error;
    }
    oc->oformat = ofmt;

    video_index = -1;
    audio_index = -1;

    for (i = 0; i < ic->nb_streams && (video_index < 0 || audio_index < 0); i++) {
        switch (ic->streams[i]->codec->codec_type) {
            case AVMEDIA_TYPE_VIDEO:
                video_index = i;
                ic->streams[i]->discard = AVDISCARD_NONE;
                video_st = add_output_stream(oc, ic->streams[i]);
                break;
            case AVMEDIA_TYPE_AUDIO:
                audio_index = i;
                ic->streams[i]->discard = AVDISCARD_NONE;
                audio_st = add_output_stream(oc, ic->streams[i]);
                break;
            default:
                ic->streams[i]->discard = AVDISCARD_ALL;
                break;
        }
    }

    av_dump_format(oc, 0, output_prefix, 1);
    
    if (video_index >=0) {
      codec = avcodec_find_decoder(video_st->codec->codec_id);
      if (!codec) {
        fprintf(stderr, "Could not find video decoder, key frames will not be honored\n");
      }

      if (avcodec_open2(video_st->codec, codec, NULL) < 0) {
        fprintf(stderr, "Could not open video decoder, key frames will not be honored\n");
      }
    }

    snprintf(output_filename, strlen(output_prefix) + 15, "%s-%u.ts", output_prefix, ++output_index);
    if (avio_open(&oc->pb, output_filename, AVIO_FLAG_WRITE) < 0) {
        fprintf(stderr, "Could not open '%s'\n", output_filename);
        goto error;
    }

    if (avformat_write_header(oc, NULL)) {
        fprintf(stderr, "Could not write mpegts header to first output file\n");
        goto error;
    }

    prev_segment_time = (double)(ic->start_time) / (double)(AV_TIME_BASE);

    streamLace = createStreamLace(ic->nb_streams);
    
    do {
        double segment_time = 0.0;
        AVPacket packet;
        double packetStartTime = 0.0;
        double packetDuration = 0.0;
        
        if (!decode_done)
        {
            decode_done = av_read_frame(ic, &packet);
            if (!decode_done)
            {
                if (packet.stream_index != video_index &&
                    packet.stream_index != audio_index)
                {
                    av_free_packet(&packet);
                    continue;
                }
                
                double timeStamp = 
                    (double)(packet.pts) * 
                    (double)(ic->streams[packet.stream_index]->time_base.num) /
                    (double)(ic->streams[packet.stream_index]->time_base.den);
                
                if (av_dup_packet(&packet) < 0)
                {
                    fprintf(stderr, "Could not duplicate packet\n");
                    av_free_packet(&packet);
                    break;
                }
                
                insertPacket(streamLace, &packet, timeStamp);
            }
        }
        
        if (countPackets(streamLace) < 50 && !decode_done)
        {
            /* allow the queue to fill up so that the packets can be sorted properly */
            continue;
        }
        
        if (!removePacket(streamLace, &packet))
        {
            if (decode_done)
            {
                /* the queue is empty, we are done */
                break;
            }
            
            assert(decode_done);
            continue;
        }
        
        packetStartTime = 
            (double)(packet.pts) * 
            (double)(ic->streams[packet.stream_index]->time_base.num) /
            (double)(ic->streams[packet.stream_index]->time_base.den);
        
        packetDuration =
            (double)(packet.duration) *
            (double)(ic->streams[packet.stream_index]->time_base.num) /
            (double)(ic->streams[packet.stream_index]->time_base.den);
        
#if !defined(NDEBUG) && (defined(DEBUG) || defined(_DEBUG))
        if (av_log_get_level() >= AV_LOG_VERBOSE)
            fprintf(stderr,
                    "stream %i, packet [%f, %f)\n",
                    packet.stream_index,
                    packetStartTime,
                    packetStartTime + packetDuration);
#endif

        segment_duration = packetStartTime + packetDuration - prev_segment_time;

        // NOTE: segments are supposed to start on a keyframe.
        // If the keyframe interval and segment duration do not match
        // forcing the segment creation for "better seeking behavior"
        // will result in decoding artifacts after seeking or stream switching.
        if (packet.stream_index == video_index && (packet.flags & AV_PKT_FLAG_KEY || strict_segment_duration)) {
            segment_time = packetStartTime;
        }
        else if (video_index < 0) {
            segment_time = packetStartTime;
        }
        else {
            segment_time = prev_segment_time;
        }

        if (segment_time - prev_segment_time + segment_duration_error_tolerance >
            target_segment_duration + extra_duration_needed) 
        {
            avio_flush(oc->pb);
            avio_close(oc->pb);

            // Keep track of accumulated rounding error to account for it in later chunks.
            double segment_duration = segment_time - prev_segment_time;
            int rounded_segment_duration = (int)(segment_duration + 0.5);
            extra_duration_needed += (double)rounded_segment_duration - segment_duration;

            updatePlaylist(playlist,
                           playlist_filename,
                           output_filename,
                           output_index,
                           rounded_segment_duration);
            
            snprintf(output_filename, strlen(output_prefix) + 15, "%s-%u.ts", output_prefix, ++output_index);
            if (avio_open(&oc->pb, output_filename, AVIO_FLAG_WRITE) < 0) {
                fprintf(stderr, "Could not open '%s'\n", output_filename);
                break;
            }

            // close when we find the 'kill' file
            if (kill_file) {
                FILE* fp = fopen("kill", "rb");
                if (fp) {
                    fprintf(stderr, "user abort: found kill file\n");
                    fclose(fp);
                    remove("kill");
                    decode_done = 1;
                    removeAllPackets(streamLace);
                }
            }
            prev_segment_time = segment_time;
        }

        ret = av_interleaved_write_frame(oc, &packet);
        if (ret < 0) {
            fprintf(stderr, "Warning: Could not write frame of stream\n");
        }
        else if (ret > 0) {
            fprintf(stderr, "End of stream requested\n");
            av_free_packet(&packet);
            break;
        }

        av_free_packet(&packet);
    } while (!decode_done || countPackets(streamLace) > 0);

    av_write_trailer(oc);

    if (video_index >= 0) {
      avcodec_close(video_st->codec);
    }

    for(i = 0; i < oc->nb_streams; i++) {
        av_freep(&oc->streams[i]->codec);
        av_freep(&oc->streams[i]);
    }

    avio_close(oc->pb);
    av_free(oc);

    updatePlaylist(playlist,
                   playlist_filename,
                   output_filename,
                   output_index,
                   segment_duration);
    closePlaylist(playlist);
    releasePlaylist(&playlist);
    
    if (pid_filename)
    {
        remove(pid_filename);
    }

    return 0;

error:
    if (pid_filename)
    {
        remove(pid_filename);
    }

    return 1;

}
예제 #17
0
파일: ffms.cpp 프로젝트: 1974kpkpkp/ffms2
FFMS_API(int) FFMS_GetLogLevel() {
	return av_log_get_level();
}
예제 #18
0
// UCI格式图像解码,目前只支持输出24位BGR和32位的BGRA,返回0表示调用成功,负数表示错误,不支持多线程同时访问
__declspec(dllexport) int __stdcall UCIDecode(
	const void* src,	// 输入UCI数据指针(不能传入null,其它指针参数可以传入null表示不需要输出)
	int 		srclen, // 输入UCI数据长度
	void**		dst,	// 输出RAW数据的指针(BGR或BGRA格式)
	int*		stride, // 输出RAW数据的行间字节跨度(dst不为null时,stride不能传入null)
	int*		width,	// 输出图像的宽度值
	int*		height, // 输出图像的高度值
	int*		bit)	// 输出图像的bpp值(每像素位数)
{
	extern AVCodec ff_h264_decoder;
	int ret = 0;
	U8* frame0 = 0;
	U8* frame1 = 0;
	U8* frame_data[4];
	int frame_size[4];
	int w, h, b, m;
	int ww, hh;
	int size, hasimg, bit10;
	U8* psrc, *pdst;
	const U8* srcend = (const U8*)src + srclen;

	if(dst	 ) *dst    = 0;
	if(stride) *stride = 0;
	if(width ) *width  = 0;
	if(height) *height = 0;
	if(bit	 ) *bit    = 0;
	if(!src || srclen < 0 || dst && !stride) return -1;
	if(srclen < 12) return -2;
	if((*(U32*)src & 0xffffff) != *(U32*)"UCI") return -3;
	switch(*((U8*)src + 3))
	{
	case '3':	b = 24; m = 0; break; // part range, YUV420
	case '4':	b = 32; m = 0; break; // part range, YUV420+A
	case 'T':	b = 24; m = 1; break; // part range, Y+U+V(420)
	case 'Q':	b = 32; m = 1; break; // part range, Y+U+V(420)+A
	case 0x20:	b = 24; m = 2; break; // full range, YUV420
	case 0x21:	b = 32; m = 2; break; // full range, YUV420+A
	case 0x40:	b = 24; m = 3; break; // full range, YUV444
	case 0x41:	b = 32; m = 3; break; // full range, YUV444+A
	default: return -4;
	}
	w = *(int*)((U8*)src + 4);
	h = *(int*)((U8*)src + 8);

	if(width ) *width  = w;
	if(height) *height = h;
	if(bit	 ) *bit    = b;
	if(stride) *stride = (m == 1 ? w : (w+7)&0xfffffff8) * (b/8);
	if(!dst) return 0;
	if(srclen < 12 + 4) return -5;
	ww = w + (w & ((m & 1) ^ 1));
	hh = h + (h & ((m & 1) ^ 1));

	frame_data[0] = (U8*)src + 12 + 4;
	frame_size[0] = *(int*)((U8*)src + 12);
	if(m != 1)
	{
		if(b == 24)
		{
			if(frame_size[0] < 0 || frame_data[0] + frame_size[0] > srcend)		return -10;
		}
		else
		{
			if(frame_size[0] < 0 || frame_data[0] + frame_size[0] + 4 > srcend) return -11;
			frame_data[3] = frame_data[0] + frame_size[0] + 4;
			frame_size[3] = *(int*)(frame_data[0] + frame_size[0]);
			if(frame_size[3] < 0 || frame_data[3] + frame_size[3] > srcend)		return -12;
		}
	}
	else
	{
		if(frame_size[0] < 0 || frame_data[0] + frame_size[0] + 4 > srcend)		return -13;
		frame_data[1] = frame_data[0] + frame_size[0] + 4;
		frame_size[1] = *(int*)(frame_data[0] + frame_size[0]);
		if(frame_size[1] < 0 || frame_data[1] + frame_size[1] + 4 > srcend)		return -14;
		frame_data[2] = frame_data[1] + frame_size[1] + 4;
		frame_size[2] = *(int*)(frame_data[1] + frame_size[1]);
		if(b == 24)
		{
			if(frame_size[2] < 0 || frame_data[2] + frame_size[2] > srcend)		return -15;
		}
		else
		{
			if(frame_size[2] < 0 || frame_data[2] + frame_size[2] + 4 > srcend) return -16;
			frame_data[3] = frame_data[2] + frame_size[2] + 4;
			frame_size[3] = *(int*)(frame_data[2] + frame_size[2]);
			if(frame_size[3] < 0 || frame_data[3] + frame_size[3] > srcend)		return -17;
		}
	}

	EnterCriticalSection(&g_cs);
	if(!g_frame && !(g_frame = avcodec_alloc_frame()))							{ ret = -20; goto end_; }
	if(!g_context && !(g_context = avcodec_alloc_context3(&ff_h264_decoder)))	{ ret = -21; goto end_; }
	g_context->flags &= ~CODEC_FLAG_EMU_EDGE;
	if(av_log_get_level() >= AV_LOG_DEBUG) g_context->debug = -1;
	if(avcodec_open2(g_context, &ff_h264_decoder, 0) < 0)						{ ret = -22; goto end_; }
	g_context->flags &= ~CODEC_FLAG_EMU_EDGE;
	g_packet.data = frame_data[0];
	g_packet.size = frame_size[0];
	size = avcodec_decode_video2(g_context, g_frame, &hasimg, &g_packet);
	if(size <= 0)																{ ret = -100 + size; goto end_; }
	if(!hasimg) 																{ ret = -23; goto end_; }
	if(g_context->width < ww || g_context->height < hh)							{ ret = -24; goto end_; }
	if(!g_frame->data[0] || m != 1 && (!g_frame->data[1] || !g_frame->data[2]))	{ ret = -25; goto end_; }
	if(g_frame->linesize[1] != g_frame->linesize[2])							{ ret = -26; goto end_; }
	bit10 = ((H264Context*)g_context->priv_data)->sps.bit_depth_luma;
	if(bit10 == 8) bit10 = 0; else if(bit10 != 10)								{ ret = -27; goto end_; }

	if(m != 1)
	{
		enum AVPixelFormat pfsrc, pfdst = (b == 24 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_BGRA);
		if(m != 3)	pfsrc = (bit10 ? AV_PIX_FMT_YUV420P10LE : AV_PIX_FMT_YUV420P);
		else		pfsrc = (bit10 ? AV_PIX_FMT_YUV444P10LE : AV_PIX_FMT_YUV444P);
		if(!(*dst = malloc(*stride * hh)))						{ ret = -28; goto end_; }
		g_swsctx = sws_getCachedContext(g_swsctx, ww, hh, pfsrc, ww, hh, pfdst, SWS_LANCZOS, 0, 0, 0);
		if(!g_swsctx)											{ ret = -29; goto end_; }
		sws_setColorspaceDetails(g_swsctx, g_cs_table, m != 0, g_cs_table, m != 0, 0, 1 << 16, 1 << 16);
		if(sws_scale(g_swsctx, g_frame->data, g_frame->linesize, 0, hh, dst, stride) != hh) { ret = -30; goto end_; }
	}
	else
	{
		if(!(frame0 = (U8*)malloc(g_frame->linesize[0] * h)))	{ ret = -31; goto end_; }
		memcpy(frame0, g_frame->data[0], g_frame->linesize[0] * h);
		frame_data[0] = frame0;
		frame_size[0] = g_frame->linesize[0];
		g_packet.data = frame_data[1];
		g_packet.size = frame_size[1];
		size = avcodec_decode_video2(g_context, g_frame, &hasimg, &g_packet);
		if(size <= 0)											{ ret = -200 + size; goto end_; }
		if(!hasimg) 											{ ret = -32; goto end_; }
		if(g_context->width < ww || g_context->height < hh)		{ ret = -33; goto end_; }
		if(!g_frame->data[0])									{ ret = -34; goto end_; }
		if(!(frame1 = (U8*)malloc(g_frame->linesize[0] * h)))	{ ret = -35; goto end_; }
		memcpy(frame1, g_frame->data[0], g_frame->linesize[0] * h);
		frame_data[1] = frame1;
		frame_size[1] = g_frame->linesize[0];
		g_packet.data = frame_data[2];
		g_packet.size = frame_size[2];
		size = avcodec_decode_video2(g_context, g_frame, &hasimg, &g_packet);
		if(size <= 0)											{ ret = -300 + size; goto end_; }
		if(!hasimg) 											{ ret = -36; goto end_; }
		if(g_context->width < ww || g_context->height < hh)		{ ret = -37; goto end_; }
		if(!g_frame->data[0])									{ ret = -38; goto end_; }
		frame_data[2] = g_frame->data[0];
		frame_size[2] = g_frame->linesize[0];
		if(!(*dst = malloc(*stride * h)))						{ ret = -39; goto end_; }
		if(b == 24) YUV444_BGR (*dst, w * 3, frame_data, frame_size, w, h);
		else		YUV444_BGRA(*dst, w * 4, frame_data, frame_size, w, h);
	}

	if(b == 32)
	{
		avcodec_close(g_context);
		if(!(g_context = avcodec_alloc_context3(&ff_h264_decoder)))	{ ret = -40; goto end_; }
		if(av_log_get_level() >= AV_LOG_DEBUG) g_context->debug = -1;
		if(avcodec_open2(g_context, &ff_h264_decoder, 0) < 0)	{ ret = -41; goto end_; }
		g_packet.data = frame_data[3];
		g_packet.size = frame_size[3];
		size = avcodec_decode_video2(g_context, g_frame, &hasimg, &g_packet);
		if(size <= 0)											{ ret = -400 + size; goto end_; }
		if(!hasimg) 											{ ret = -42; goto end_; }
		if(g_context->width < ww || g_context->height < hh)		{ ret = -43; goto end_; }
		if(!g_frame->data[0])									{ ret = -44; goto end_; }
		psrc = g_frame->data[0];
		pdst = (U8*)*dst + 3;
		if(!bit10)
			for(; h; --h)
			{
				for(m = 0; m < w; ++m)
					pdst[m * 4] = psrc[m];
				psrc += g_frame->linesize[0];
				pdst += *stride;
			}
		else
			for(; h; --h)
			{
				for(m = 0; m < w; ++m)
					pdst[m * 4] = ((U16*)psrc)[m] >> 2;
				psrc += g_frame->linesize[0];
				pdst += *stride;
			}
	}
예제 #19
-2
//----------------------------------------------------------------
// main_utf8
// 
int main_utf8(int argc, char **argv)
{
	const char *input = NULL;
	const char *output_prefix = "";
	double target_segment_duration = 0.0;
	char *segment_duration_check = NULL;
	const char *playlist_filename = NULL;
	const char *http_prefix = "";
	long max_tsfiles = 0;
	char *max_tsfiles_check = NULL;
	double prev_segment_time = 0.0;
	double segment_duration = 0.0;
	unsigned int output_index = 1;
	const AVClass *fc = avformat_get_class();
	AVDictionary *format_opts = NULL;
	AVOutputFormat *ofmt = NULL;
	AVFormatContext *ic = NULL;
	AVFormatContext *oc = NULL;
	AVStream *video_st = NULL;
	AVStream *audio_st = NULL;
	AVCodec *codec = NULL;
	char *output_filename = NULL; 	
	int if_save_keyframe = 0;			//add by wanggm
	char *keyframeinfo_filename = NULL;	//add by wanggm
	json_object *obj = NULL;			//add by wanggm
	json_object *info_arr_obj = NULL;	//add by wanggm

	int if_monitor_related_process = 0;	//add by wanggm
	pid_t relatedProcessPid = 1; 		//add by wanggm
	char *pid_filename = NULL;
	int video_index = -1;
	int audio_index = -1;
	int kill_file = 0;
	int decode_done = 0;
	int ret = 0;
	int i = 0;
	TSMStreamLace * streamLace = NULL;
	TSMPlaylist * playlist = NULL;
	const double segment_duration_error_tolerance = 0.05;
	double extra_duration_needed = 0;
	int strict_segment_duration = 0;

	av_log_set_level(AV_LOG_INFO);


	for (i = 1; i < argc; i++)
	{
		if (strcmp(argv[i], "-i") == 0)
		{
			if ((argc - i) <= 1)
				usage(argv, "could not parse -i parameter");
			i++;
			input = argv[i];
		}
		else if (strcmp(argv[i], "-o") == 0)
		{
			if ((argc - i) <= 1)
				usage(argv, "could not parse -o parameter");
			i++;
			output_prefix = argv[i];
		}
		else if (strcmp(argv[i], "-d") == 0)
		{
			if ((argc - i) <= 1)
				usage(argv, "could not parse -d parameter");
			i++;

			target_segment_duration = strtod(argv[i], &segment_duration_check);
			if (segment_duration_check
					== argv[i] || target_segment_duration == HUGE_VAL
					|| target_segment_duration == -HUGE_VAL){
				usage3(argv, "invalid segment duration: ", argv[i]);
			}
		}
		else if (strcmp(argv[i], "-x") == 0)
		{
			if ((argc - i) <= 1)
				usage(argv, "could not parse -x parameter");
			i++;
			playlist_filename = argv[i];
		}
		else if (strcmp(argv[i], "-p") == 0)
		{
			if ((argc - i) <= 1)
				usage(argv, "could not parse -p parameter");
			i++;
			http_prefix = argv[i];
		}
		else if (strcmp(argv[i], "-w") == 0)
		{
			if ((argc - i) <= 1)
				usage(argv, "could not parse -w parameter");
			i++;

			max_tsfiles = strtol(argv[i], &max_tsfiles_check, 10);
			if (max_tsfiles_check == argv[i] || max_tsfiles < 0
					|| max_tsfiles >= INT_MAX)
			{
				usage3(argv, "invalid live stream max window size: ", argv[i]);
			}
		}
		else if (strcmp(argv[i], "-P") == 0)
		{
			if ((argc - i) <= 1)
				usage(argv, "could not parse -P parameter");
			i++;
			pid_filename = argv[i];
		}
		else if (strcmp(argv[i], "--watch-for-kill-file") == 0)
		{
			// end program when it finds a file with name 'kill':
			kill_file = 1;
		}
		else if (strcmp(argv[i], "--strict-segment-duration") == 0)
		{
			// force segment creation on non-keyframe boundaries:
			strict_segment_duration = 1;
		}
		else if (strcmp(argv[i], "--avformat-option") == 0)
		{
			const AVOption *of;
			const char *opt;
			const char *arg;
			if ((argc - i) <= 1)
				usage(argv, "could not parse --avformat-option parameter");
			i++;
			opt = argv[i];
			if ((argc - i) <= 1)
				usage(argv, "could not parse --avformat-option parameter");
			i++;
			arg = argv[i];

			if ((of = av_opt_find(&fc, opt, NULL, 0,
					AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)))
				av_dict_set(&format_opts, opt, arg,
						(of->type == AV_OPT_TYPE_FLAGS) ? AV_DICT_APPEND : 0);
			else
				usage3(argv, "unknown --avformat-option parameter: ", opt);
		}
		else if (strcmp(argv[i], "--loglevel") == 0)
		{
			const char *arg;
			if ((argc - i) <= 1)
				usage(argv, "could not parse --loglevel parameter");
			i++;
			arg = argv[i];

			if (loglevel(arg))
				usage3(argv, "unknown --loglevel parameter: ", arg);
		}
		else if (strcmp(argv[i], "-k") == 0)
		{ //add by wanggm for save key frame information into json file.
			if ((argc - i) <= 1)
				usage(argv, "could not parse -k parameter");
			i++;
			if_save_keyframe = atoi(argv[i]);
		}
		else if( strcmp(argv[i], "-s") == 0)
		{//add by wanggm for set the start index of ts file.
			if ( (argc -i ) <= 1)
				usage(argv, "could not parse -s parmeter");
			i++;
			
			char *output_index_check = NULL;
			output_index  = strtol(argv[i], &output_index_check, 10);
			if ( output_index_check== argv[i] || output_index < 0
					|| output_index >= INT_MAX)
			{
				usage3(argv, "invalid start index of ts file: ", argv[i]);
			}
			
		}
		else if( strcmp(argv[i], "-m") == 0)
		{ // add by wanggm for exit by monitor the process of which pid is given. 
			if ((argc - i) <= 1)
				usage(argv, "could not parse -m parmeter");
			i++;

			if_monitor_related_process = 1;
			unsigned int tmpPid= atoi(argv[i]);
			if( tmpPid > 0)
			{  
				relatedProcessPid = (pid_t) tmpPid;
				fprintf(stdout, "%s I will exit when the process PID= %d exit.\n", getSystemTime(timeChar), relatedProcessPid);
			}
		}
	}

	
	if (!input)
	{
		usage(argv, "-i input file parameter must be specified");
	}

	if (!playlist_filename)
	{
		usage(argv, "-x m3u8 playlist file parameter must be specified");
	}

	if (target_segment_duration == 0.0)
	{
		usage(argv, "-d segment duration parameter must be specified");
	}

	if( output_index <= 0 )
	{
		output_index = 1;	
	}

	if( 1 == if_monitor_related_process)
	{
		pthread_t id;
		pthread_create(&id, NULL, (void*)monitor_process, relatedProcessPid);
	}


	// Create PID file
	if (pid_filename)
	{
		FILE* pid_file = fopen_utf8(pid_filename, "wb");
		if (pid_file)
		{
			fprintf(pid_file, "%d", getpid());
			fclose(pid_file);
		}
	}


	av_register_all();
	avformat_network_init();

	if (!strcmp(input, "-"))
	{
		input = "pipe:";
	}

	output_filename = (char*) malloc(
			sizeof(char) * (strlen(output_prefix) + 15));
	//add by wanggm
	if(  if_save_keyframe == 1)
	{ 
		keyframeinfo_filename = (char*) malloc(
			sizeof(char)* (strlen(output_prefix) + 15));
	}
	if (!output_filename || (1 == if_save_keyframe && !keyframeinfo_filename))
	{
		fprintf(stderr, "%s Could not allocate space for output filenames\n", getSystemTime( timeChar));
		goto error;
	}

	playlist = createPlaylist(max_tsfiles, target_segment_duration,
			http_prefix);
	if (!playlist)
	{
		fprintf(stderr,
				"%s Could not allocate space for m3u8 playlist structure\n", getSystemTime( timeChar));
		goto error;
	}

	ret = avformat_open_input(&ic, input, NULL, (format_opts) ? &format_opts : NULL);

	if (ret != 0)
	{
		fprintf(stderr,
				"%sCould not open input file, make sure it is an mpegts or mp4 file: %d\n", getSystemTime(timeChar), ret);
		goto error;
	}
	av_dict_free(&format_opts);

	if (avformat_find_stream_info(ic, NULL) < 0)
	{
		fprintf(stderr, "%s Could not read stream information\n", getSystemTime( timeChar));
		goto error;
	}

#if LIBAVFORMAT_VERSION_MAJOR > 52 || (LIBAVFORMAT_VERSION_MAJOR == 52 && \
                                       LIBAVFORMAT_VERSION_MINOR >= 45)
	ofmt = av_guess_format("mpegts", NULL, NULL);
#else
	ofmt = guess_format("mpegts", NULL, NULL);
#endif

	if (!ofmt)
	{
		fprintf(stderr, "%s Could not find MPEG-TS muxer\n", getSystemTime( timeChar));
		goto error;
	}

	oc = avformat_alloc_context();
	if (!oc)
	{
		fprintf(stderr, "%s Could not allocated output context\n", getSystemTime( timeChar));
		goto error;
	}
	oc->oformat = ofmt;

	video_index = -1;
	audio_index = -1;

	for (i = 0; i < ic->nb_streams && (video_index < 0 || audio_index < 0); i++)
	{
		switch (ic->streams[i]->codec->codec_type)
		{
		case AVMEDIA_TYPE_VIDEO:
			video_index = i;
			ic->streams[i]->discard = AVDISCARD_NONE;
			video_st = add_output_stream(oc, ic->streams[i]);
			break;
		case AVMEDIA_TYPE_AUDIO:
			audio_index = i;
			ic->streams[i]->discard = AVDISCARD_NONE;
			audio_st = add_output_stream(oc, ic->streams[i]);
			break;
		default:
			ic->streams[i]->discard = AVDISCARD_ALL;
			break;
		}
	}

	av_dump_format(oc, 0, output_prefix, 1);

	if (video_index >= 0)
	{
		codec = avcodec_find_decoder(video_st->codec->codec_id);
		if (!codec)
		{
			fprintf(stderr,
					"%s Could not find video decoder, key frames will not be honored\n", getSystemTime( timeChar));
		}

		if (avcodec_open2(video_st->codec, codec, NULL) < 0)
		{
			fprintf(stderr,
					"%s Could not open video decoder, key frames will not be honored\n", getSystemTime( timeChar));
		}
	}

	snprintf(output_filename, strlen(output_prefix) + 15, "%s-%u.ts",
			output_prefix, output_index);

	if( 1 == if_save_keyframe)
	{ 
		snprintf(keyframeinfo_filename, strlen(output_prefix) + 15,
			"%s-%u.idx", output_prefix, output_index);
		obj = json_object_new_object();
		info_arr_obj = create_json_header(obj);
	}

	if (avio_open(&oc->pb, output_filename, AVIO_FLAG_WRITE) < 0)
	{
		fprintf(stderr, "%s Could not open '%s'\n", getSystemTime( timeChar),output_filename);
		goto error;
	}

	if (avformat_write_header(oc, NULL))
	{
		fprintf(stderr, "%s Could not write mpegts header to first output file\n", getSystemTime( timeChar));
		goto error;
	}

	prev_segment_time = (double) (ic->start_time) / (double) (AV_TIME_BASE);

	streamLace = createStreamLace(ic->nb_streams);

	// add by houmr
	int continue_error_cnt = 0;
	//int pushcnt = 0;
	//int popcnt = 0;
	int tscnt = 0;
	int audiopktcnt = 0;
	int videopktcnt = 0;
	int kfcnt = 0;
	int errpktcnt = 0;
	/////////////////////////
	do
	{
		double segment_time = 0.0;
		AVPacket packet;
		double packetStartTime = 0.0;
		double packetDuration = 0.0;

		if (!decode_done)
		{
			//fprintf(stdout, "%s av_read_frame() begin.\n", getSystemTime( timeChar));
			decode_done = av_read_frame(ic, &packet);
			//fprintf(stdout, "%s av_read_frame() end. packet.size=%d stream_index=%d duration=%d\n", getSystemTime( timeChar), packet.size, packet.stream_index, packet.duration);
			//fprintf(stdout, "%s decode_done=%d\n", getSystemTime( timeChar),decode_done);
			if (!decode_done)
			{
				if (packet.stream_index != video_index
						&& packet.stream_index != audio_index)
				{
					if( ++errpktcnt >= 10)
					{
						decode_done = 1;	
					}
					fprintf(stderr, "%s packet is not video or audio, packet.stream_index=%d\n", getSystemTime( timeChar), packet.stream_index);
					av_free_packet(&packet);
					continue;
				}
				errpktcnt = 0;

				/*printf("orgin : index - %d\t pts = %s\t duration=%d\n", packet.stream_index,
				 av_ts2str(packet.pts), packet.duration);*/
				// add by houmr
				/*if (adjust_pts(&packet, video_index, audio_index) < 0)
				{
					av_free_packet(&packet);
					continue;
				}
				*/
				/////////////////////////////////////
				double timeStamp =
						(double) (packet.pts)
								* (double) (ic->streams[packet.stream_index]->time_base.num)
								/ (double) (ic->streams[packet.stream_index]->time_base.den);

				if (av_dup_packet(&packet) < 0)
				{
					fprintf(stderr, "%s Could not duplicate packet\n" ,getSystemTime( timeChar));
					av_free_packet(&packet);
					break;
				}
				
				/*
				for(int i = 0; i < streamLace->numStreams; ++i)
				{ 
						fprintf(stdout, "streamLace[%d].size=%d\t", i, streamLace->streams[i]->size);
				}
				fprintf(stdout, "\n");
				*/
				insertPacket(streamLace, &packet, timeStamp);
			}
		}

		if (countPackets(streamLace) < 50 && !decode_done)
		{
			/* allow the queue to fill up so that the packets can be sorted properly */
			continue;
		}

		if (!removePacket(streamLace, &packet))
		{
			fprintf(stdout, "%s get packet failed!!\n", getSystemTime( timeChar));
			if (decode_done)
			{
				/* the queue is empty, we are done */
				break;
			}

			assert(decode_done);
			continue;
		}

		//fprintf(stdout, "%s get 1 packet success. packet info: pts=%ld, dts=%ld\n", getSystemTime( timeChar), packet.pts, packet.dts);
		packetStartTime = (double) (packet.pts)
				* (double) (ic->streams[packet.stream_index]->time_base.num)
				/ (double) (ic->streams[packet.stream_index]->time_base.den);

		packetDuration = (double) (packet.duration)
				* (double) (ic->streams[packet.stream_index]->time_base.num)
				/ (double) (ic->streams[packet.stream_index]->time_base.den);

#if !defined(NDEBUG) && (defined(DEBUG) || defined(_DEBUG))
		if (av_log_get_level() >= AV_LOG_VERBOSE)
		fprintf(stderr,
				"%s stream %i, packet [%f, %f)\n",
				getSystemTime( timeChar),
				packet.stream_index,
				packetStartTime,
				packetStartTime + packetDuration);
#endif

		segment_duration = packetStartTime + packetDuration - prev_segment_time;

		// NOTE: segments are supposed to start on a keyframe.
		// If the keyframe interval and segment duration do not match
		// forcing the segment creation for "better seeking behavior"
		// will result in decoding artifacts after seeking or stream switching.
		if (packet.stream_index == video_index
				&& (packet.flags & AV_PKT_FLAG_KEY || strict_segment_duration))
		{
			//This is video packet and ( packet is key frame or strict time is needed )
			segment_time = packetStartTime;
		}
		else if (video_index < 0)
		{
			//This stream doesn't contain video stream
			segment_time = packetStartTime;
		}
		else
		{
			//This is a video packet or a video packet but not key frame 
			segment_time = prev_segment_time;
		}

		//fprintf(stdout, "%s extra_duration_needed=%f\n", getSystemTime( timeChar), extra_duration_needed);
		if (segment_time - prev_segment_time + segment_duration_error_tolerance
				> target_segment_duration + extra_duration_needed)
		{
			fprintf(stdout, "%s segment_time=%lf prev_segment_time=%lf  > target_segment_duration=%lf  extra_duration_needed=%lf\n", getSystemTime( timeChar), segment_time, prev_segment_time,  target_segment_duration, extra_duration_needed);
			fprintf(stdout, "%s File %s contains %d PES packet, of which %d are audio packet, %d are video packet within %d key frame.\n", getSystemTime( timeChar), output_filename, tscnt, audiopktcnt, videopktcnt, kfcnt);
			fflush(stdout);
			/*
			for(int i = 0; i < streamLace->numStreams; ++i)
			{
					fprintf(stdout, "%s streamLace[%d].size=%d\t", getSystemTime( timeChar), i, streamLace->streams[i]->size);
			}
			*/
			tscnt = audiopktcnt = videopktcnt = kfcnt = 0;
			avio_flush(oc->pb);
			avio_close(oc->pb);

			// Keep track of accumulated rounding error to account for it in later chunks.
		/*
			double segment_duration = segment_time - prev_segment_time;
			int rounded_segment_duration = (int) (segment_duration + 0.5);
			extra_duration_needed += (double) rounded_segment_duration
					- segment_duration;
		*/
			double seg_dur = segment_time - prev_segment_time;
			int rounded_segment_duration = (int) (seg_dur + 0.5);
			extra_duration_needed += (target_segment_duration - seg_dur - segment_duration_error_tolerance);
			//fprintf(stdout, "%s ________extra_duration_needed = %lf\n", getSystemTime( timeChar), extra_duration_needed);
			

			updatePlaylist(playlist, playlist_filename, output_filename,
					output_index, rounded_segment_duration);

			
			snprintf(output_filename, strlen(output_prefix) + 15, "%s-%u.ts",
					output_prefix, ++output_index);
			//add by wanggm
			//Save the all the keyframe information into json file
			if( 1 == if_save_keyframe && NULL != obj)
			{ 
				save_json_to_file(keyframeinfo_filename, obj);
				obj = info_arr_obj = NULL;

				snprintf(keyframeinfo_filename, strlen(output_prefix) + 15,
					"%s-%u.idx", output_prefix, output_index);
			}


			if (avio_open(&oc->pb, output_filename, AVIO_FLAG_WRITE) < 0)
			{
				fprintf(stderr, "%s Could not open '%s'\n", getSystemTime( timeChar), output_filename);
				break;
			}

			// close when we find the 'kill' file
			if (kill_file)
			{
				FILE* fp = fopen("kill", "rb");
				if (fp)
				{
					fprintf(stderr, "%s user abort: found kill file\n", getSystemTime( timeChar));
					fclose(fp);
					remove("kill");
					decode_done = 1;
					removeAllPackets(streamLace);
				}
			}
			prev_segment_time = segment_time;
		}

		//add by wanggm.
		++tscnt;
		if( video_index == packet.stream_index)
		{
			++videopktcnt;	
			if(1 == packet.flags)
			{ 
				++kfcnt;	
				if( 1 == if_save_keyframe)
				{
					//If it is key frame, it's information should be saved.
					//fprintf(stdout, "%s packet is keyframe, packet.pts=%ld\n", getSystemTime( timeChar), packet.pts);
					snprintf(keyframeinfo_filename, strlen(output_prefix) + 15,
					"%s-%u.idx", output_prefix, output_index);
					if (NULL == obj && NULL == info_arr_obj)
					{ 
						obj = json_object_new_object();
						info_arr_obj = create_json_header(obj);
					}
					avio_flush(oc->pb);		//flush the previous data into ts file.
					int64_t offset = avio_tell(oc->pb);	//Get the offset of this key frame in the file.
					save_keyframe_info(info_arr_obj, offset, packet.pts);
					//fprintf(stdout, "%s Keyframe.pos=%ld \tkeyframe.pts=%ld\n", getSystemTime( timeChar), offset, (long)packet.pts);
				}
			}
		}else if( audio_index == packet.stream_index)
		{
			++audiopktcnt;	
		}
		//fprintf(stdout, "%s packet is not keyframe.\n", getSystemTime( timeChar));

		ret = av_interleaved_write_frame(oc, &packet);

		if (ret < 0)
		{
			fprintf(stderr, "%s Warning: Could not write frame of stream\n", getSystemTime( timeChar));
			// add by houmr
			continue_error_cnt++;
			if (continue_error_cnt > 10)
			{
				av_free_packet(&packet);
				break;
			}
		}
		else if (ret > 0)
		{
			fprintf(stderr, "%s End of stream requested\n", getSystemTime( timeChar));
			av_free_packet(&packet);
			break;
		}
		else
		{
			// add by houmr error
			continue_error_cnt = 0;
			////////////////////////
		}
		av_free_packet(&packet);
	} while (!decode_done || countPackets(streamLace) > 0);

	av_write_trailer(oc);

	if (video_index >= 0)
	{
		avcodec_close(video_st->codec);
	}

	for (i = 0; i < oc->nb_streams; i++)
	{
		av_freep(&oc->streams[i]->codec);
		av_freep(&oc->streams[i]);
	}

	avio_close(oc->pb);
	av_free(oc);

	updatePlaylist(playlist, playlist_filename, output_filename, output_index,
			segment_duration);
	closePlaylist(playlist);
	releasePlaylist(&playlist);

	//add by wanggm
	if( 1 == if_save_keyframe && obj != NULL)
	{ 
		save_json_to_file(keyframeinfo_filename, obj);
	}

	if (pid_filename)
	{
		remove(pid_filename);
	}
	
	fflush(stdout);
	fflush(stderr);

	return 0;

	error: if (pid_filename)
	{
		remove(pid_filename);
	}

	return 1;

}