Esempio n. 1
0
bool vl::LoggerT<T>::add_stream(const std::string& filename, LogLevel reporting_level)
{
    assert(!filename.empty() && reporting_level != nologging);

    if (!filename.empty())
    {
        std::ofstream* file = new std::ofstream(filename.c_str(), std::ios_base::app);

        if (file && *file && file->is_open())
        {
            return add_stream(file, reporting_level);
        }
        else
        {
            if (file)
                delete file;

            return false;
        }
    }
    else
    {
        return add_stream(nullptr, reporting_level);
    }
}
Esempio n. 2
0
device_status mock_depth_sensor::on_initialize()
{
    auto depth = std::make_shared<mock_depth_stream>();
    add_stream(depth);

    auto ir = std::make_shared<mock_ir_stream>();
    add_stream(ir);

    return device_status_value::ok;
}
Esempio n. 3
0
void CVideoLivRecord::InitRecoder(LPCSTR lpFileName,LONG lWidth,LONG lHeight,INT iKeyFrameInterval,int iOnlyVideo)
{
	m_Width = lWidth;
	m_Height = lHeight;
	m_videoduriation = iKeyFrameInterval;
	m_audioduriation = iKeyFrameInterval;

	int ret = 0;
	char filename[MAX_PATH] = {0};
	memcpy(filename, lpFileName, strlen(lpFileName));
// 	strcat(filename, ".");
// 	strcat(filename, FILE_SUFFIX);

	avformat_alloc_output_context2(&m_pAVFormatContext, NULL, NULL, filename);
	if (!m_pAVFormatContext){
		log("[CVideoLivRecord::InitRecoder] -- avformat_alloc_output_context2() error");
		return ;
	}
	//video
	if (m_pAVFormatContext->oformat->video_codec != AV_CODEC_ID_NONE){
		add_stream(&m_pVideoStream, &m_pVideoCodec, m_pAVFormatContext->oformat->video_codec);
		m_bHasVideo = TRUE;
		m_bEncodeVideo = TRUE;
	}
	//audio
	if (iOnlyVideo == 0 && m_pAVFormatContext->oformat->audio_codec != AV_CODEC_ID_NONE){
		add_stream(&m_pAudioStream, &m_pAudioCodec, m_pAVFormatContext->oformat->audio_codec);
		m_bHasAudio = TRUE;
		m_bEncodeAudio = TRUE;
	}
	if (m_bHasVideo){
		open_video(m_pVideoStream, m_pVideoCodec, m_pOpt);
	}
	if (m_bHasAudio){
		open_audio(m_pAudioStream, m_pAudioCodec, m_pOpt);
	}

	if (!(m_pAVFormatContext->oformat->flags & AVFMT_NOFILE)){
		ret = avio_open(&m_pAVFormatContext->pb, filename, AVIO_FLAG_WRITE);
		if (ret < 0){
			log("[CVideoLivRecord::InitRecoder] -- avio_open() error");
			return ;
		}
	}

	ret = avformat_write_header(m_pAVFormatContext, &m_pOpt);
	if (ret < 0){
		log("[CVideoLivRecord::InitRecoder] -- avformat_write_header() error");
		return ;
	}
}
Esempio n. 4
0
    astra_status_t device_streamset::open_sensor_streams()
    {
        PROFILE_FUNC();

        bool enableColor = true;
        if (enableColor && oniDevice_.hasSensor(openni::SENSOR_COLOR))
        {
            ColorStream* stream = astra::plugins::make_stream<ColorStream>(pluginService_,
                                                  streamSetHandle_,
                                                  oniDevice_, *this);

            astra_status_t rc = ASTRA_STATUS_SUCCESS;
            rc = stream->open();
            add_stream(stream);

            if ( rc != ASTRA_STATUS_SUCCESS)
                LOG_WARN("orbbec.ni.device_streamset", "unable to open openni color stream.");
        }

        if (oniDevice_.hasSensor(openni::SENSOR_DEPTH))
        {
depthstream* stream = astra::plugins::make_stream<depthstream>(pluginService_,
                                                  streamSetHandle_,
                                                  oniDevice_, *this);

            astra_status_t rc = ASTRA_STATUS_SUCCESS;
            rc = stream->open();
            add_stream(stream);

            if (rc != ASTRA_STATUS_SUCCESS)
                LOG_WARN("orbbec.ni.device_streamset", "unable to open openni depth stream.");
        }

        if (oniDevice_.hasSensor(openni::SENSOR_IR))
        {
infrared_stream* stream = astra::plugins::make_stream<infrared_stream>(pluginService_,
                                                          streamSetHandle_,
                                                          oniDevice_,
                                                          *this);

            astra_status_t rc = ASTRA_STATUS_SUCCESS;
            rc = stream->open();
            add_stream(stream);

            if (rc != ASTRA_STATUS_SUCCESS)
                LOG_WARN("orbbec.ni.device_streamset", "unable to open openni infrared stream.");
        }

        return ASTRA_STATUS_SUCCESS;
    }
Esempio n. 5
0
device_status mock_color_sensor::on_initialize()
{
    auto color = std::make_shared<mock_color_stream>();
    add_stream(color);

    return device_status_value::ok;
}
Esempio n. 6
0
void init()
{	
	boost::log::add_common_attributes<wchar_t>();
	typedef boost::log::aux::add_common_attributes_constants<wchar_t> traits_t;

	typedef boost::log::sinks::synchronous_sink<boost::log::sinks::wtext_file_backend> file_sink_type;

	typedef boost::log::sinks::asynchronous_sink<boost::log::sinks::wtext_ostream_backend> stream_sink_type;

	auto stream_backend = boost::make_shared<boost::log::sinks::wtext_ostream_backend>();
	stream_backend->add_stream(boost::shared_ptr<std::wostream>(&std::wcout, boost::log::empty_deleter()));
	stream_backend->auto_flush(true);

	auto stream_sink = boost::make_shared<stream_sink_type>(stream_backend);
	
//#ifdef NDEBUG
//	stream_sink->set_filter(boost::log::filters::attr<severity_level>(boost::log::sources::aux::severity_attribute_name<wchar_t>::get()) >= debug);
//#else
//	stream_sink->set_filter(boost::log::filters::attr<severity_level>(boost::log::sources::aux::severity_attribute_name<wchar_t>::get()) >= debug);
//#endif

	stream_sink->locked_backend()->set_formatter(&my_formatter);

	boost::log::wcore::get()->add_sink(stream_sink);
}
Esempio n. 7
0
int add_stream_from_pcap(char *file_path)
{
    pcap_t *fp;
    char errbuf[PCAP_ERRBUF_SIZE];
    char source[PCAP_BUF_SIZE];
    struct pcap_pkthdr *header;
    const u_char *pkt_data;
    t_stream t_stream_tmp;
    
    /* Create the source string according to the new WinPcap syntax */
    if ( pcap_createsrcstr( source,         // variable that will keep the source string
                            PCAP_SRC_FILE,  // we want to open a file
                            NULL,           // remote host
                            NULL,           // port on the remote host
                            file_path,        // name of the file we want to open
                            errbuf          // error buffer
                            ) != 0)
    {
        WinPrintf(hwnd_frame, "Error creating a source string");
        return -1;
    }
    
    /* Open the capture file */
    if ( (fp= pcap_open(source,         // name of the device
                        65536,          // portion of the packet to capture
                                        // 65536 guarantees that the whole packet will be captured on all the link layers
                         PCAP_OPENFLAG_PROMISCUOUS,     // promiscuous mode
                         1000,              // read timeout
                         NULL,              // authentication on the remote machine
                         errbuf         // error buffer
                         ) ) == NULL)
    {
        WinPrintf(hwnd_frame, "打开文件失败:\n%s\n可能是抓包存档文件损坏或格式不支持", source);
        return -1;
    }

    while (pcap_next_ex(fp, &header, &pkt_data)>0)
    {
        if (nr_cur_stream>=MAX_STREAM_NUM)
        {
             err_msg_box("已达最大流数目 %d", MAX_STREAM_NUM);
             break;
        }


        init_stream(&t_stream_tmp);
        t_stream_tmp.len=header->caplen;
        memcpy(t_stream_tmp.data, pkt_data, t_stream_tmp.len);
        t_stream_tmp.err_flags = build_err_flags((void *)(t_stream_tmp.data), t_stream_tmp.len);
        add_stream(&t_stream_tmp);


    }

    pcap_close(fp);	
    re_populate_items();
    return 0;
}
Esempio n. 8
0
void 
Network::accept_stream(void) 
{
   NetStream *s = wait_for_connect();
   cerr << "Network   accept_stream from ---->" << s->name() << endl;
   add_stream(s);
   add_client(s);

   // notify observers
   notify_net(Network_obs::accept_str, s);
}
Esempio n. 9
0
int Add_audio_video_streams(OutputStream *video_st, OutputStream *audio_st, AVFormatContext *oc, AVOutputFormat *fmt, AVCodec *audio_codec, AVCodec *video_codec, IOParam &io)
{
	int ret = 0;
	if (fmt->video_codec != AV_CODEC_ID_NONE)
	{
		add_stream(video_st, oc, &video_codec, fmt->video_codec);
		video_st->st->codec->width = io.frame_width;
		video_st->st->codec->height = io.frame_height;
		ret |= HAVE_VIDEO;
		ret |= ENCODE_VIDEO;
	}
	if (fmt->audio_codec != AV_CODEC_ID_NONE)
	{
		add_stream(audio_st, oc, &audio_codec, fmt->audio_codec);
		ret |= HAVE_AUDIO;
		ret |= ENCODE_AUDIO;
	}

	return ret;
}
Esempio n. 10
0
static http2_stream_data *
create_http2_stream_data(http2_session_data *session_data, int32_t stream_id) {
  http2_stream_data *stream_data;
  stream_data = malloc(sizeof(http2_stream_data));
  memset(stream_data, 0, sizeof(http2_stream_data));
  stream_data->stream_id = stream_id;
  stream_data->fd = -1;

  add_stream(session_data, stream_data);
  return stream_data;
}
Esempio n. 11
0
void
Network::connect_to(
   NetStream *s
   )
{
   if (s && s->fd() != -1) {
      add_stream(s); 
      if (Config::get_var_bool("PRINT_ERRS",false,true))
          cerr << "Network::connect_to - sending identity to server" << endl;
      *s << NETidentify << port_ << NETflush;
  }
}
void
LoggerFactory::setDestinationImpl(std::ostream& os)
{
  std::lock_guard<std::mutex> lock(m_mutex);

  auto backend = boost::make_shared<boost::log::sinks::text_ostream_backend>();
  backend->auto_flush(true);
  backend->add_stream(boost::shared_ptr<std::ostream>(&os, bind([]{})));

  boost::log::core::get()->remove_sink(m_sink);
  m_sink.reset();

  m_sink = boost::make_shared<Sink>(backend);
  m_sink->set_formatter(boost::log::expressions::stream << boost::log::expressions::message);
  boost::log::core::get()->add_sink(m_sink);
}
Esempio n. 13
0
LoggerFactory::LoggerFactory()
  : m_defaultLevel(LOG_INFO)
{
  m_levelNames["NONE"] = LOG_NONE;
  m_levelNames["ERROR"] = LOG_ERROR;
  m_levelNames["WARN"] = LOG_WARN;
  m_levelNames["INFO"] = LOG_INFO;
  m_levelNames["DEBUG"] = LOG_DEBUG;
  m_levelNames["TRACE"] = LOG_TRACE;
  m_levelNames["ALL"] = LOG_ALL;

  auto backend = boost::make_shared<boost::log::sinks::text_ostream_backend>();
  backend->auto_flush(true);
  backend->add_stream(boost::shared_ptr<std::ostream>(&std::clog, bind([]{})));

  m_sink = boost::make_shared<Sink>(backend);
  m_sink->set_formatter(boost::log::expressions::stream << boost::log::expressions::message);
  boost::log::core::get()->add_sink(m_sink);
}
Esempio n. 14
0
static int get_rtsp_streams(streams_struct **ppstream_head)
{
	int ret;
	int i = 0;
	char buf[PATH_MAX];

	dprintf("%s: in\n", __FUNCTION__);

	if ( pstream_head != NULL && *ppstream_head != NULL )
	{
		interface_clearMenuEntries(_M &rtspStreamMenu); // don't leave thumb pointers to previous streams!
		clean_list(*ppstream_head);
		*ppstream_head = NULL;
	}

	dprintf("%s: call find_streams\n", __FUNCTION__);

	ret = find_streams(ppstream_head);

	if ( !((ret == 0)&&(pstream_head != NULL)) )
	{
		int file;

		dprintf("%s: open %s\n", __FUNCTION__, appControlInfo.rtspInfo.streamFile);

		file = open(appControlInfo.rtspInfo.streamFile, O_RDONLY);

		if ( file > 0 )
		{
			while ( helperReadLine(file, buf) == 0 )
			{
				if ( strlen(buf) > 0 )
				{
					add_stream(ppstream_head, buf, i);
				}
				i++;
			}
		}
		ret = file;
	}

	return ret;
}
Esempio n. 15
0
void add_stream_from_raw_data(void *buf, int len)
{


  t_stream t_stream_tmp;

  if (nr_cur_stream>=MAX_STREAM_NUM)
  {
      err_msg_box("已达最大流数目 %d", MAX_STREAM_NUM);
      return;
  }


    init_stream(&t_stream_tmp);
    t_stream_tmp.len=len;
    memcpy(t_stream_tmp.data, buf, len);
    t_stream_tmp.err_flags = build_err_flags((void *)(t_stream_tmp.data), len);
    add_stream(&t_stream_tmp);

    re_populate_items();
}
Esempio n. 16
0
/* Add and configure a stream with a participant and an audio configuration
   into the receiver. */
static void add_receiver_entity() {
    static int count = 1;
    char name[256];
    sprintf(name, "IN_%i", count);
    participant_data_t *p = init_participant(1, INPUT, NULL, 0);
    stream_data_t *s = init_stream(AUDIO, INPUT, rand(), I_AWAIT, 0, name);
    add_participant_stream(s, p);
    ap_config(s->audio, RECEIVER_AUDIO_FORMAT_BPS,
            RECEIVER_AUDIO_FORMAT_SAMPLE_RATE,
            RECEIVER_AUDIO_FORMAT_CHANNELS,
            RECEIVER_AUDIO_FORMAT_CODEC);
    add_stream(receiver->audio_stream_list, s);
    ap_worker_start(s->audio);
    fprintf(stderr, "  ·Adding %s: %i bps, %iHz, %i channel, %s\n",
            name,
            RECEIVER_AUDIO_FORMAT_BPS,
            RECEIVER_AUDIO_FORMAT_SAMPLE_RATE,
            RECEIVER_AUDIO_FORMAT_CHANNELS,
            get_name_to_audio_codec(RECEIVER_AUDIO_FORMAT_CODEC));
    count++;
}
Esempio n. 17
0
void
Logging::setDestinationImpl(shared_ptr<std::ostream> os)
{
  std::lock_guard<std::mutex> lock(m_mutex);

  m_destination = std::move(os);

  auto backend = boost::make_shared<boost::log::sinks::text_ostream_backend>();
  backend->auto_flush(true);
  backend->add_stream(boost::shared_ptr<std::ostream>(m_destination.get(), [] (auto) {}));

  if (m_sink != nullptr) {
    boost::log::core::get()->remove_sink(m_sink);
    m_sink->flush();
    m_sink.reset();
  }

  m_sink = boost::make_shared<Sink>(backend);
  m_sink->set_formatter(boost::log::expressions::stream << boost::log::expressions::message);
  boost::log::core::get()->add_sink(m_sink);
}
Esempio n. 18
0
void add_cout_sink()
{
    boost::log::add_common_attributes();
    // boost::log::core::get()->add_global_attribute("NativeThreadId",
    // boost::log::attributes::make_function(&std::this_thread::get_id));
    boost::log::core::get()->add_global_attribute("TimestampMillis", boost::log::attributes::make_function([] {
                                                      return boost::posix_time::microsec_clock::local_time();
                                                  }));

    using stream_sink_type = sinks::asynchronous_sink<sinks::wtext_ostream_backend>;

    auto stream_backend = boost::make_shared<boost::log::sinks::wtext_ostream_backend>();
    stream_backend->add_stream(boost::shared_ptr<std::wostream>(&std::wcout, boost::null_deleter()));
    stream_backend->auto_flush(true);

    auto stream_sink = boost::make_shared<stream_sink_type>(stream_backend);

    stream_sink->set_formatter(boost::bind(&my_formatter<boost::log::wformatting_ostream>, false, _1, _2));

    logging::core::get()->add_sink(stream_sink);
}
Esempio n. 19
0
static void rtsp_urlHandler(void *pArg, const char *location, const char *desc, xmlConfigHandle_t track)
{
	int            *pIndex = (int*)pArg;
	streams_struct *pstream;
	char           *ptr;

	if( location[0] )
	{
		pstream = add_stream(ppstream_head, (char *)location, *pIndex);

		helperSafeStrCpy(&pstream->name, desc);

		if( track && pstream )
		{
			ptr = (char*)xmlConfigGetText(track, "descr");
			if( ptr && *ptr )
			{
				//dprintf("%s: Adding '%s' description to %03d stream\n", __FUNCTION__, ptr, *pIndex);
				helperSafeStrCpy( &pstream->description, ptr );
			}

			ptr = (char*)xmlConfigGetText(track, "thumb");
			if( ptr && *ptr )
			{
				//dprintf("%s: Adding '%s' thumb to %03d stream\n", __FUNCTION__, ptr, *pIndex);
				helperSafeStrCpy( &pstream->thumb, ptr );
			}

			ptr = (char*)xmlConfigGetText(track, "poster");
			if( ptr && *ptr )
			{
				//dprintf("%s: Adding '%s' poster to %03d stream\n", __FUNCTION__, ptr, *pIndex);
				helperSafeStrCpy( &pstream->poster, ptr );
			}
		}

		(*pIndex)++;
	}
}
Esempio n. 20
0
/* Add and configure a stream with a participant and an audio configuration
   into the transmitter. */
static void add_transmitter_entity(char *ip, int port) {
    static int count = 1;
    char name[256];
    sprintf(name, "OUT_%i", count);
    participant_data_t *p = init_participant(0, OUTPUT, ip, port);
    stream_data_t *s = init_stream(AUDIO, OUTPUT, 0, ACTIVE, 0, name);
    add_participant_stream(s, p);
    ap_config(s->audio, TRANSMITTER_AUDIO_FORMAT_BPS,
            TRANSMITTER_AUDIO_FORMAT_SAMPLE_RATE,
            TRANSMITTER_AUDIO_FORMAT_CHANNELS,
            TRANSMITTER_AUDIO_FORMAT_CODEC);
    add_stream(transmitter->audio_stream_list, s);
    ap_worker_start(s->audio);
    fprintf(stderr,
            "  ·Adding %s: %i bps, %iHz, %i channel, %s, to addr %s:%i\n",
            name,
            TRANSMITTER_AUDIO_FORMAT_BPS,
            TRANSMITTER_AUDIO_FORMAT_SAMPLE_RATE,
            TRANSMITTER_AUDIO_FORMAT_CHANNELS,
            get_name_to_audio_codec(TRANSMITTER_AUDIO_FORMAT_CODEC),
            ip,
            port);
    count++;
}
int main(int argc, char **argv)
{
    int stop_at = 0;
    char *yuv_path;
    if (argc == 2) {
        yuv_path = argv[1];
    } else if (argc == 3) {
        yuv_path = argv[1];
        stop_at = atoi(argv[2]);
    } else {
        printf("usage: %s input [max frames]\n", argv[0]);
        return -1;
    }

    printf("[test] initializing streams list\n");
    printf("[test] init_stream_list\n");
    stream_list_t *streams = init_stream_list();
    printf("[test] init_stream\n");
    stream_data_t *stream = init_stream(VIDEO, OUTPUT, 0, ACTIVE, "i2catrocks");
    printf("[test] set_stream_video_data\n");
    printf("[test] add_stream\n");
    add_stream(streams, stream);

    printf("[test] initializing transmitter\n");
    transmitter_t *transmitter = init_transmitter(streams, 20.0);
    start_transmitter(transmitter);

	rtsp_serv_t *server;
	server = malloc(sizeof(rtsp_serv_t));
  
	server->port = 8554;
	server->streams = streams;
	server->transmitter = transmitter;
    
    init_encoder(stream->video);
    
    c_start_server(server);
    c_update_server(server);
       
    // Stuff ... 
    AVFormatContext *pformat_ctx = avformat_alloc_context();
    AVCodecContext codec_ctx;
    int video_stream = -1;
    av_register_all();

    int width = 1280;
    int height = 534;

    load_video(yuv_path, pformat_ctx, &codec_ctx, &video_stream);

    uint8_t *b1 = (uint8_t *)av_malloc(avpicture_get_size(codec_ctx.pix_fmt,
                        codec_ctx.width, codec_ctx.height)*sizeof(uint8_t));
    
    int counter = 0;

    struct timeval a, b;
    video_data_frame_t *decoded_frame;
    
    while(1) {
    
        gettimeofday(&a, NULL);
        
        int ret = read_frame(pformat_ctx, video_stream, &codec_ctx, b1);
        if (stop_at > 0 && counter == stop_at) {
            break;
        }

        if (ret == 0) {
            counter++;
            
            decoded_frame = curr_in_frame(stream->video->decoded_frames);
            if (decoded_frame == NULL){
                continue;
            }
            
            decoded_frame->buffer_len = vc_get_linesize(width, RGB)*height;
            memcpy(decoded_frame->buffer, b1, decoded_frame->buffer_len); 
            
            put_frame(stream->video->decoded_frames);
        } else {
            break;
        }
        gettimeofday(&b, NULL);
        long diff = (b.tv_sec - a.tv_sec)*1000000 + b.tv_usec - a.tv_usec;

        if (diff < 40000) {
            usleep(40000 - diff);
        } else {
            usleep(0);
        }
    }
    debug_msg(" deallocating resources and terminating threads\n");
    av_free(pformat_ctx);
    av_free(b1);
    debug_msg(" done!\n");

    stop_transmitter(transmitter);

    destroy_stream_list(streams);

    return 0;
}
Esempio n. 22
0
MirWaitHandle* MirSurface::modify(MirSurfaceSpec const& spec)
{
    mp::SurfaceModifications mods;

    {
        std::unique_lock<decltype(mutex)> lock(mutex);
        mods.mutable_surface_id()->set_value(surface->id().value());
    }

    auto const surface_specification = mods.mutable_surface_specification();

    #define COPY_IF_SET(field)\
        if (spec.field.is_set())\
        surface_specification->set_##field(spec.field.value())

    COPY_IF_SET(width);
    COPY_IF_SET(height);
    COPY_IF_SET(pixel_format);
    COPY_IF_SET(buffer_usage);
    // name is a special case (below)
    COPY_IF_SET(output_id);
    COPY_IF_SET(type);
    COPY_IF_SET(state);
    // preferred_orientation is a special case (below)
    // parent_id is a special case (below)
    // aux_rect is a special case (below)
    COPY_IF_SET(edge_attachment);
    COPY_IF_SET(min_width);
    COPY_IF_SET(min_height);
    COPY_IF_SET(max_width);
    COPY_IF_SET(max_height);
    COPY_IF_SET(width_inc);
    COPY_IF_SET(height_inc);
    // min_aspect is a special case (below)
    // max_aspect is a special case (below)
    #undef COPY_IF_SET

    if (spec.surface_name.is_set())
        surface_specification->set_name(spec.surface_name.value());

    if (spec.pref_orientation.is_set())
        surface_specification->set_preferred_orientation(spec.pref_orientation.value());

    if (spec.parent.is_set() && spec.parent.value())
        surface_specification->set_parent_id(spec.parent.value()->id());

    if (spec.parent_id)
    {
        auto id = surface_specification->mutable_parent_persistent_id();
        id->set_value(spec.parent_id->as_string());
    }

    if (spec.aux_rect.is_set())
    {
        auto const rect = surface_specification->mutable_aux_rect();
        auto const& value = spec.aux_rect.value();
        rect->set_left(value.left);
        rect->set_top(value.top);
        rect->set_width(value.width);
        rect->set_height(value.height);
    }

    if (spec.min_aspect.is_set())
    {
        auto const aspect = surface_specification->mutable_min_aspect();
        aspect->set_width(spec.min_aspect.value().width);
        aspect->set_height(spec.min_aspect.value().height);
    }

    if (spec.max_aspect.is_set())
    {
        auto const aspect = surface_specification->mutable_max_aspect();
        aspect->set_width(spec.max_aspect.value().width);
        aspect->set_height(spec.max_aspect.value().height);
    }

    if (spec.streams.is_set())
    {
        auto_resize_stream = false;
        for(auto const& stream : spec.streams.value())
        {
            auto const new_stream = surface_specification->add_stream();
            new_stream->set_displacement_x(stream.displacement_x);
            new_stream->set_displacement_y(stream.displacement_y);
            new_stream->mutable_id()->set_value(
                reinterpret_cast<mcl::ClientBufferStream*>(stream.stream)->rpc_id().as_value());
        }
    }

    if (spec.input_shape.is_set())
    {
        for (auto const& rect : spec.input_shape.value())
        {
            auto const new_shape = surface_specification->add_input_shape();
            new_shape->set_left(rect.left);
            new_shape->set_top(rect.top);
            new_shape->set_width(rect.width);
            new_shape->set_height(rect.height);
        }
    }

    modify_wait_handle.expect_result();
    server->modify_surface(&mods, modify_result.get(),
              google::protobuf::NewCallback(this, &MirSurface::on_modified));

    return &modify_wait_handle;
}
Esempio n. 23
0
/* media file output */
int main(int argc, char **argv)
{
	const char *filename;
	AVOutputFormat *fmt;
	AVFormatContext *oc;
	AVStream *video_st = NULL;
	AVCodec *video_codec = NULL;
	double video_time;
	int flush, ret;

	/* Initialize libavcodec, and register all codecs and formats. */
	av_register_all();

	filename = "E:\\muxing.mp4";
	/* allocate the output media context */
	avformat_alloc_output_context2(&oc, NULL, NULL, filename);

	if (!oc) {
		printf("Could not deduce output format from file extension: using MPEG.\n");
		avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
	}
	if (!oc)
		return 1;


	fmt = oc->oformat; //muxing 할 때 outputformat 설정

	/* Add the audio and video streams using the default format codecs
	* and initialize the codecs. */

	//fmt->video_codec = AV_CODEC_ID_H264;
	fmt->video_codec = AV_CODEC_ID_MPEG4;

	if (fmt->video_codec != AV_CODEC_ID_NONE)
		video_st = add_stream(oc, &video_codec, fmt->video_codec); // add_stream(AVFormatContext *oc, AVCodec **codec,enum AVCodecID codec_id)
	// codec parameters set 함수

	/* Now that all the parameters are set, we can open the audio and
	* video codecs and allocate the necessary encode buffers. */
	if (video_st)
		open_video(oc, video_codec, video_st); // (AVFormatContext *oc, AVCodec *codec, AVStream *st)
	// 코댁 열기, 프레임 설정

	av_dump_format(oc, 0, filename, 1); // 정보 출력 디버깅 함수


	/* open the output file, if needed */
	if (!(fmt->flags & AVFMT_NOFILE)) {

		ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);

		if (ret < 0) {
			char buf[256];
			av_strerror(ret, buf, sizeof(buf));
			fprintf(stderr, "Could not open '%s': %s\n", filename, buf);
			return 1;
		}
	}

	/* Write the stream header, if any. */
	ret = avformat_write_header(oc, NULL); // Allocate the stream private data and write the stream header to an output media file. 
	// 헤더파일 생성 -> 본체 생성 -> 마무리 작업

	if (ret < 0) {
		char buf[256];
		av_strerror(ret, buf, sizeof(buf));
		fprintf(stderr, "Error occurred when opening output file: %s\n", buf);
		return 1;
	}

	flush = 0;

	while ((video_st && !video_is_eof)) {

		if (!flush && (!video_st)) {
			flush = 1;
		}
		if (video_st && !video_is_eof) {
			write_video_frame(oc, video_st, flush); // 본체 생성
		}

		if (frame_count == 10000)
			break;
	}

	/* Write the trailer, if any. The trailer must be written before you
	* close the CodecContexts open when you wrote the header; otherwise
	* av_write_trailer() may try to use memory that was freed on
	* av_codec_close(). */

	av_write_trailer(oc);

	/* Close each codec. */
	if (video_st)
		close_video(oc, video_st);

	if (!(fmt->flags & AVFMT_NOFILE))
		/* Close the output file. */
		avio_close(oc->pb);

	/* free the stream */
	avformat_free_context(oc);

	return 0;
}
Esempio n. 24
0
int main(int argc, char **argv)
{
    OutputStream audio_st = { 0 };
    const char *filename;
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    AVCodec *audio_codec;
    int ret;
    int have_audio = 0;
    int encode_audio = 0;
    AVDictionary *opt = NULL;
    
    /* Initialize libavcodec, and register all codecs and formats. */
    av_register_all();
    avformat_network_init();
    
    if (argc < 2) {
        printf("usage: %s output_file\n", argv[0]);
        return 1;
    }
    av_dict_set(&opt, "strict", "experimental", 0);
    
    filename = argv[1];
    if (argc > 3 && !strcmp(argv[2], "-flags")) {
        av_dict_set(&opt, argv[2]+1, argv[3], 0);
    }
    
    /* allocate the output media context */
    avformat_alloc_output_context2(&oc, NULL, "sdp", filename);
    if (!oc) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
    }
    if (!oc)
        return 1;
    
    fmt = oc->oformat;
    
    /* Add the audio and video streams using the default format codecs
     * and initialize the codecs. */
    if (fmt->audio_codec != AV_CODEC_ID_NONE) {
        add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
    }
    
    /* Now that all the parameters are set, we can open the audio and
     * video codecs and allocate the necessary encode buffers. */

    open_audio(oc, audio_codec, &audio_st, opt);
    
    av_dump_format(oc, 0, filename, 1);
    
    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            fprintf(stderr, "Could not open '%s': %s\n", filename,
                    av_err2str(ret));
            return 1;
        }
    }
    
    /* Write the stream header, if any. */
    ret = avformat_write_header(oc, &opt);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file: %s\n",
                av_err2str(ret));
        return 1;
    }
    
    while (encode_audio) {
        /* select the stream to encode */
            encode_audio = !write_audio_frame(oc, &audio_st);
    }
    
    /* Write the trailer, if any. The trailer must be written before you
     * close the CodecContexts open when you wrote the header; otherwise
     * av_write_trailer() may try to use memory that was freed on
     * av_codec_close(). */
    av_write_trailer(oc);
    
    /* Close each codec. */
    close_stream(oc, &audio_st);
    
    if (!(fmt->flags & AVFMT_NOFILE))
    /* Close the output file. */
        avio_close(oc->pb);
    
    /* free the stream */
    avformat_free_context(oc);
    
    return 0;
}
Esempio n. 25
0
int main() {

    fprintf(stderr, "Starting audio_receiver test...\n");

    // Open files to write audio
    if ((F_audio1 = fopen(name_audio1, "wb")) == NULL) {
        perror(name_audio1);
        exit(errno);
    }
    if ((F_audio2 = fopen(name_audio2, "wb")) == NULL) {
        perror(name_audio2);
        exit(errno);
    }

    // General pourpouse variables.
    time_t start, stop;
    audio_frame2 *audio_frame;

    // Receiver configuration
    stream_list_t *video_stream_list = init_stream_list(); // Not used
    stream_list_t *audio_stream_list = init_stream_list();
    receiver_t *receiver = init_receiver(video_stream_list, audio_stream_list, 5004, 5006);

    // First stream and participant configuration
    participant_data_t *p1 = init_participant(1, INPUT, NULL, 0);
    stream_data_t *stream1 = init_stream(AUDIO, INPUT, rand(), I_AWAIT, 25.0, "Stream1");
    add_participant_stream(stream1, p1);
    add_stream(receiver->audio_stream_list, stream1);
    fprintf(stderr, " ·Stream1 configuration: 1 bps, 32000Hz, 1 channel, mulaw\n");
    ap_config(stream1->audio, 1, 32000, 1, AC_MULAW);
    ap_worker_start(stream1->audio);

    // Second stream and participant configuration
    participant_data_t *p2 = init_participant(2, INPUT, NULL, 0);
    stream_data_t *stream2 = init_stream(AUDIO, INPUT, rand(), I_AWAIT, 25.0, "Stream2");
    add_participant_stream(stream2, p2);
    add_stream(receiver->audio_stream_list, stream2);
    fprintf(stderr, " ·Stream2 configuration: 1 bps, 8000Hz, 1 channel, mulaw\n");
    ap_config(stream2->audio, 1, 8000, 1, AC_MULAW);
    ap_worker_start(stream2->audio);

    if (start_receiver(receiver)) {
        fprintf(stderr, " ·Receiver started!\n");

#ifdef STREAM1
        // STREAM1 recording block
        fprintf(stderr, "  ·Waiting for audio_frame2 data\n");
        while (stream1->audio->decoded_cq->level == CIRCULAR_QUEUE_EMPTY) {
#ifdef QUEUE_PRINT
            print_cq_status(stream1->audio->decoded_cq, "wait stream1");
#endif
        }
#ifdef QUEUE_PRINT
        print_cq_status(stream1->audio->decoded_cq, "continue stream1");
#endif

        fprintf(stderr, "   ·Copying to file... ");
        start = time(NULL);
        stop = start + RECORD_TIME;
        while (time(NULL) < stop) { // RECORD_TIME seconds loop
            audio_frame = cq_get_front(stream1->audio->decoded_cq);
            if (audio_frame != NULL) {
                fwrite(audio_frame->data[0], audio_frame->data_len[0], 1, F_audio1);
                cq_remove_bag(stream1->audio->decoded_cq);
            }
        }
        fprintf(stderr, "Done!\n");
#endif //STREAM1

#ifdef STREAM2
        // STREAM2 recording block
        fprintf(stderr, "  ·Waiting for audio_frame2 data\n");
        while (stream2->audio->decoded_cq->level == CIRCULAR_QUEUE_EMPTY) {
#ifdef QUEUE_PRINT
            print_cq_status(stream2->audio->decoded_cq, "wait stream2");
#endif
        }
#ifdef QUEUE_PRINT
        print_cq_status(stream2->audio->decoded_cq, "continue stream2");
#endif
        fprintf(stderr, "   ·Copying to file... ");
        start = time(NULL);
        stop = start + RECORD_TIME;
        while (time(NULL) < stop) { // RECORD_TIME seconds loop
            audio_frame = cq_get_front(stream2->audio->decoded_cq);
            if (audio_frame != NULL) {
                fwrite(audio_frame->data[0], audio_frame->data_len[0], 1, F_audio2);
                cq_remove_bag(stream2->audio->decoded_cq);
            }
        }
        fprintf(stderr, "Done!\n");
#endif //STREAM2

        // Finish and destroy objects
        stop_receiver(receiver);
        destroy_receiver(receiver);
        fprintf(stderr, " ·Receiver stopped\n");
        destroy_stream_list(video_stream_list);
        destroy_stream_list(audio_stream_list);
    }

    if (fclose(F_audio1) != 0) {
        perror(name_audio1);
        exit(-1);
    }
    if (fclose(F_audio2) != 0) {
        perror(name_audio2);
        exit(-1);
    }
    fprintf(stderr, "Finished\n");
}
Esempio n. 26
0
int main(int argc, char **argv)
{
    const char *filename;
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    AVStream *audio_st, *video_st;
    AVCodec *audio_codec, *video_codec;
    double audio_time, video_time;
    int flush, ret;

    /* Initialize libavcodec, and register all codecs and formats. */
    av_register_all();

    if (argc != 2) {
        printf("usage: %s output_file\n"
               "API example program to output a media file with libavformat.\n"
               "This program generates a synthetic audio and video stream, encodes and\n"
               "muxes them into a file named output_file.\n"
               "The output format is automatically guessed according to the file extension.\n"
               "Raw images can also be output by using '%%d' in the filename.\n"
               "\n", argv[0]);
        return 1;
    }

    filename = argv[1];

    /* allocate the output media context */
    avformat_alloc_output_context2(&oc, NULL, NULL, filename);
    if (!oc) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
    }
    if (!oc)
        return 1;

    fmt = oc->oformat;

    /* Add the audio and video streams using the default format codecs
     * and initialize the codecs. */
    video_st = NULL;
    audio_st = NULL;

    if (fmt->video_codec != AV_CODEC_ID_NONE)
        video_st = add_stream(oc, &video_codec, fmt->video_codec);
    if (fmt->audio_codec != AV_CODEC_ID_NONE)
        audio_st = add_stream(oc, &audio_codec, fmt->audio_codec);

    /* Now that all the parameters are set, we can open the audio and
     * video codecs and allocate the necessary encode buffers. */
    if (video_st)
        open_video(oc, video_codec, video_st);
    if (audio_st)
        open_audio(oc, audio_codec, audio_st);

    av_dump_format(oc, 0, filename, 1);

    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            fprintf(stderr, "Could not open '%s': %s\n", filename,
                    av_err2str(ret));
            return 1;
        }
    }

    /* Write the stream header, if any. */
    ret = avformat_write_header(oc, NULL);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file: %s\n",
                av_err2str(ret));
        return 1;
    }

    flush = 0;
    while ((video_st && !video_is_eof) || (audio_st && !audio_is_eof)) {
        /* Compute current audio and video time. */
        audio_time = (audio_st && !audio_is_eof) ? audio_st->pts.val * av_q2d(audio_st->time_base) : INFINITY;
        video_time = (video_st && !video_is_eof) ? video_st->pts.val * av_q2d(video_st->time_base) : INFINITY;

        if (!flush &&
            (!audio_st || audio_time >= STREAM_DURATION) &&
            (!video_st || video_time >= STREAM_DURATION)) {
            flush = 1;
        }

        /* write interleaved audio and video frames */
        if (audio_st && !audio_is_eof && audio_time <= video_time) {
            write_audio_frame(oc, audio_st, flush);
        } else if (video_st && !video_is_eof && video_time < audio_time) {
            write_video_frame(oc, video_st, flush);
        }
    }

    /* Write the trailer, if any. The trailer must be written before you
     * close the CodecContexts open when you wrote the header; otherwise
     * av_write_trailer() may try to use memory that was freed on
     * av_codec_close(). */
    av_write_trailer(oc);

    /* Close each codec. */
    if (video_st)
        close_video(oc, video_st);
    if (audio_st)
        close_audio(oc, audio_st);

    if (!(fmt->flags & AVFMT_NOFILE))
        /* Close the output file. */
        avio_close(oc->pb);

    /* free the stream */
    avformat_free_context(oc);

    return 0;
}
Esempio n. 27
0
int main(int argc, char **argv)
{
    OutputStream video_st = { 0 }, audio_st = { 0 };
    const char *filename;
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    AVCodec *audio_codec, *video_codec;
    int ret;
    int have_video = 0, have_audio = 0;
    int encode_video = 0, encode_audio = 0;
    AVDictionary *opt = NULL;

    /* Initialize libavcodec, and register all codecs and formats. */
    av_register_all();

    if (argc < 2) {
        printf("usage: %s output_file\n"
               "API example program to output a media file with libavformat.\n"
               "This program generates a synthetic audio and video stream, encodes and\n"
               "muxes them into a file named output_file.\n"
               "The output format is automatically guessed according to the file extension.\n"
               "Raw images can also be output by using '%%d' in the filename.\n"
               "\n", argv[0]);
        return 1;
    }

    filename = argv[1];
    if (argc > 3 && !strcmp(argv[2], "-flags")) {
        av_dict_set(&opt, argv[2]+1, argv[3], 0);
    }

    /* allocate the output media context */
    avformat_alloc_output_context2(&oc, NULL, NULL, filename);
    if (!oc) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
    }
    if (!oc)
        return 1;

    fmt = oc->oformat;

    /* Add the audio and video streams using the default format codecs
     * and initialize the codecs. */
    if (fmt->video_codec != AV_CODEC_ID_NONE) {
        add_stream(&video_st, oc, &video_codec, fmt->video_codec);
        have_video = 1;
        encode_video = 1;
    }
    if (fmt->audio_codec != AV_CODEC_ID_NONE) {
        add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
        have_audio = 1;
        encode_audio = 1;
    }

    /* Now that all the parameters are set, we can open the audio and
     * video codecs and allocate the necessary encode buffers. */
    if (have_video)
        open_video(oc, video_codec, &video_st, opt);

    if (have_audio)
        open_audio(oc, audio_codec, &audio_st, opt);

    av_dump_format(oc, 0, filename, 1);

    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            fprintf(stderr, "Could not open '%s': %s\n", filename,
                    av_err2str(ret));
            return 1;
        }
    }

    /* Write the stream header, if any. */
    ret = avformat_write_header(oc, &opt);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file: %s\n",
                av_err2str(ret));
        return 1;
    }

    while (encode_video || encode_audio) {
        /* select the stream to encode */
        if (encode_video &&
                (!encode_audio || av_compare_ts(video_st.next_pts, video_st.st->codec->time_base,
                                                audio_st.next_pts, audio_st.st->codec->time_base) <= 0)) {
            encode_video = !write_video_frame(oc, &video_st);
        } else {
            encode_audio = !write_audio_frame(oc, &audio_st);
        }
    }

    /* Write the trailer, if any. The trailer must be written before you
     * close the CodecContexts open when you wrote the header; otherwise
     * av_write_trailer() may try to use memory that was freed on
     * av_codec_close(). */
    av_write_trailer(oc);

    /* Close each codec. */
    if (have_video)
        close_stream(oc, &video_st);
    if (have_audio)
        close_stream(oc, &audio_st);

    if (!(fmt->flags & AVFMT_NOFILE))
        /* Close the output file. */
        avio_close(oc->pb);

    /* free the stream */
    avformat_free_context(oc);

    return 0;
}
Esempio n. 28
0
	bool FFMPEGer::init(MetaData* meta){
		if(mInited)
			return true;
		
		do{
			AVDictionary *opt = NULL;
			int ret;
			
			av_register_all();
			
			avformat_alloc_output_context2(&fmt_ctx, NULL, NULL, mOutputFile);
			if(fmt_ctx == NULL){
				ALOGE("fail to avformat_alloc_output_context2 for %s", mOutputFile);
				break;
			}

			fmt = fmt_ctx->oformat;
			
			/* Add the audio and video streams using the default format codecs
			 * and initialize the codecs. */
			if (fmt->video_codec != AV_CODEC_ID_NONE) {
				add_stream(&video_st, fmt_ctx, &video_codec, fmt->video_codec);
				have_video = true;
			}
			if (fmt->audio_codec != AV_CODEC_ID_NONE) {
				add_stream(&audio_st, fmt_ctx, &audio_codec, fmt->audio_codec);
				have_audio = true;
			}
			
			if(!have_audio && !have_video){
				ALOGE("no audio or video codec found for the fmt!");
				break;
			}

			/* Now that all the parameters are set, we can open the audio and
			 * video codecs and allocate the necessary encode buffers. */
			if (have_video)
				open_video(video_codec, &video_st, opt);
			
			if (have_audio)
				open_audio(audio_codec, &audio_st, opt);
			
			/* open the output file, if needed */
			if (!(fmt->flags & AVFMT_NOFILE)) {
				ret = avio_open(&fmt_ctx->pb, mOutputFile, AVIO_FLAG_WRITE);
				if (ret < 0) {
					ALOGE("Could not open '%s': %s", mOutputFile, av_err2str(ret));
					break;
				}
			}

			/* Write the stream header, if any. */
			ret = avformat_write_header(fmt_ctx, NULL);
			if (ret < 0) {
				ALOGE("Error occurred when opening output file: %s", av_err2str(ret));
				break;
			}
			
			mInited = true;
		}while(0);

		if(!mInited)
			reset();

		return mInited;
	}
Esempio n. 29
0
int open_output_file()
{
	AVStream *outStream = NULL;
	AVStream *inStream = NULL;
	AVCodecContext *decCtx = NULL, *encCtx = NULL;
	AVOutputFormat *ofmt = NULL;
	AVCodec *encoder = NULL;
	int ret;
	int streamIdx = 0;
	unsigned int i;

	avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, file_out);
	if (!ofmt_ctx)
	{
		av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
		return AVERROR_UNKNOWN;
	}

	ofmt = ofmt_ctx->oformat;
	ofmt->video_codec = CODEC;

	if (ofmt->video_codec != AV_CODEC_ID_NONE)
		outStream = add_stream(inStream, ofmt->video_codec, &encoder);

	if (outStream)
	{
		encCtx = outStream->codec;
		ret = avcodec_open2(encCtx, encoder, NULL);
		if (ret < 0)
		{
			av_log(NULL, AV_LOG_ERROR, "Could not open video codec\n");
			return ret;

		}
	}

	av_dump_format(ofmt_ctx, 0, file_out, 1);

	if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
	{
		ret = avio_open(&ofmt_ctx->pb, file_out, AVIO_FLAG_WRITE);
		if (ret < 0)
		{
			av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", file_out);
			return ret;
		}
	}

	//initialize muxer, write output file header
	ret = avformat_write_header(ofmt_ctx, NULL);
	if (ret < 0)
	{
		av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
		return ret;
	}

	ofmt_ctx->streams[streamIdx]->codec->time_base.den = time_base_den;
	ofmt_ctx->streams[streamIdx]->codec->time_base.num = time_base_num;


	return 0;
}
Esempio n. 30
0
LRESULT CALLBACK stream_WndProc (HWND hwnd, UINT message, WPARAM wParam, LPARAM lParam)
{
    RECT		rect ;
    int cxClient, cyClient;
    static HMENU	hMenu ;
    POINT point ;
    int ret, idx;
    TCHAR  buf[64];
    char file_name[MAX_FILE_PATH_LEN];

    static int edit_iItem=-1 ;
    static int edit_iSubItem;

     LVITEM lv_item;
    switch (message)
    {
        case WM_CREATE:
            hwnd_stream= hwnd;
            hMenu = LoadMenu (g_hInstance, TEXT("my_popup_menu")) ;
            hMenu = GetSubMenu (hMenu, 0) ;

            hwnd_dynamic_edit=CreateWindow (TEXT("edit"), TEXT(""),
                WS_CHILD|ES_AUTOHSCROLL,
                CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT,
                hwnd, (HMENU)ID_DYNAMIC_EDIT, g_hInstance, NULL) ;

            SendMessage(hwnd_dynamic_edit, WM_SETFONT, (WPARAM)char_font_2, 0); 

            hwnd_lv = CreateListView(hwnd);
            //InitListViewImageLists(hwnd_lv);
            InitListViewColumns(hwnd_lv);
            lv_row_color_init();
            old_lv_proc = (WNDPROC) SetWindowLong (hwnd_lv, 
                                 	GWL_WNDPROC, (LONG)my_lv_proc) ;

            ShowWindow(hwnd_lv, 1) ;
            refresh_window(hwnd_lv) ;

           add_tip(hwndTip, hwnd_lv, TEXT("点击鼠标右键进行操作"));

            return 0 ;

        case WM_SIZE:
      		cxClient = LOWORD (lParam) ;
      		cyClient = HIWORD (lParam) ;

            MoveWindow(hwnd_lv, 	0, 0,
                cxClient, cyClient, TRUE) ;

            return 0 ;

case WM_NOTIFY:
{

   NMHDR *pt_nmhdr=(void *)lParam;
   switch(LOWORD(wParam))
   { 
     case ID_LV:

     // if code == NM_CLICK - Single click on an item
     if(pt_nmhdr->code == NM_CLICK || pt_nmhdr->code == NM_DBLCLK) 
     {
        int iItem = ((LPNMITEMACTIVATE)lParam)->iItem;
        int iSubItem=((LPNMITEMACTIVATE)lParam)->iSubItem;

        if (iItem>=0 && ((iSubItem>=3 && iSubItem<=5) || iSubItem==7)) 
        {

            ListView_GetSubItemRect(hwnd_lv,iItem,iSubItem,LVIR_LABEL,&rect);
            ListView_GetItemText(hwnd_lv, iItem, iSubItem, buf, sizeof(buf));
            MoveWindow	(hwnd_dynamic_edit
                ,rect.left, rect.top, rect.right-rect.left, rect.bottom-rect.top, TRUE);
            SetWindowText(hwnd_dynamic_edit, buf);
            ShowWindow (hwnd_dynamic_edit, 1) ;
            SetFocus(hwnd_dynamic_edit);
            SendMessage(hwnd_dynamic_edit, EM_SETSEL, (WPARAM)0, (LPARAM)-1);

            edit_iItem = iItem;
            edit_iSubItem = iSubItem;

        }

            return 0;
     }
    else if (pt_nmhdr->code == NM_RCLICK)
    {
        point = ((LPNMITEMACTIVATE)lParam)->ptAction;
        ListView_GetItem(hwnd_lv, &lv_item);

        ClientToScreen (hwnd_lv, &point) ;
  		TrackPopupMenu (hMenu, TPM_LEFTBUTTON, point.x, point.y, 0, hwnd_stream, NULL) ;
        return 0;

    }
    else if (pt_nmhdr->code == LVN_ITEMCHANGED)
    {
        if (!init_over) break;
        if (lv_in_op)   break;
        //if((((LPNMLISTVIEW)lParam)->uOldState&LVIS_STATEIMAGEMASK) != 
        //    (((LPNMLISTVIEW)lParam)->uNewState&LVIS_STATEIMAGEMASK)) 
        {
            int iItem = ((LPNMITEMACTIVATE)lParam)->iItem;
            int selected = ListView_GetCheckState(hwnd_lv, iItem);

            if (g_apt_streams[iItem]->selected!=selected)
            {
                doc_modified=1;
                g_apt_streams[iItem]->selected=selected;
                update_statusbar();
            }
        }
        return 0;

    }
    
    break;

   }

   break;
}

        case 	WM_COMMAND:
            switch (LOWORD(wParam))
            {
                  
                case    IDM_STREAM_NEW:
                    init_stream(&gt_edit_stream);
                    gt_edit_stream.len=gat_sample_pkts[0].len;
                    memcpy(gt_edit_stream.data, gat_sample_pkts[0].pkt_data, gt_edit_stream.len);

                    ret=DialogBox(g_hInstance, TEXT("STREAM_EDIT_DLG"), hwnd, StreamEditDlgProc);
                    if (IDOK==ret)
                    {
                        add_stream(&gt_edit_stream);
                        re_populate_items();
                    }
               	return 0 ;

                case    IDM_STREAM_NEW_HEX:
                {
                    int len;
                    char buf[MAX_PACKET_LEN];
                    if (get_open_file_name(file_name, hwnd, ALL_FILE_FILTER))
                        return 0;

                    len = read_file_to_buf(buf, sizeof(buf)-1, file_name);
                    if (len>0)
                        add_stream_from_hex_text(buf, len);
                    else
                        err_msg_box("读取文件内容失败");
                   	return 0 ;
                }

                case    IDM_STREAM_NEW_BIN:
                {
                    int len;
                    char buf[MAX_PACKET_LEN];
                    if (get_open_file_name(file_name, hwnd, ALL_FILE_FILTER))
                        return 0;

                    len = read_file_to_buf(buf, sizeof(buf)-1, file_name);
                    if (len>0)
                        add_stream_from_raw_data(buf, len);
                    else
                        err_msg_box("读取文件内容失败");
                   	return 0 ;
                }

                case    IDM_STREAM_NEW_PCAP:
                {
                    if (get_open_file_name(file_name, hwnd, PCAP_FILE_FILTER))
                        return 0;
                    add_stream_from_pcap(file_name);
                   	return 0 ;
                }
                                
                case    IDM_STREAM_DEL:
                    idx=GetIndex(hwnd_lv);
       				//ListView_DeleteItem(hwnd_lv, idx);
                    delete_stream(idx);
                    re_populate_items();
                    update_statusbar();
       				return 0 ;
                case    IDM_STREAM_EDIT:
                    cur_strm_idx=GetIndex(hwnd_lv);
                    if (cur_strm_idx<0) return 0;
                    cpy_stream(&gt_edit_stream, g_apt_streams[cur_strm_idx]);
                    ret=DialogBox(g_hInstance, TEXT("STREAM_EDIT_DLG"), hwnd, StreamEditDlgProc);
                    if (IDOK==ret)
                    {
                        doc_modified=1;
                        cpy_stream(g_apt_streams[cur_strm_idx], &gt_edit_stream);
                        re_populate_items();
                    }
       				//ListView_DeleteAllItems(hwnd_lv);
       				return 0 ;
                case    IDM_STREAM_DEL_SEL:
       				//DelSel(hwnd_lv);
                    delete_sel_stream();
                    re_populate_items();
                    update_statusbar();
       				return 0 ;

                case    IDM_STREAM_SEL_ALL:
       				SelAll(hwnd_lv);
       				return 0 ;

                case    IDM_STREAM_SEL_RVS:
       				SelRvs(hwnd_lv);
       				return 0 ;
                    
                case    IDM_STREAM_COPY:
       				copy_idx = GetIndex(hwnd_lv);
       				return 0 ;

                case    IDM_STREAM_PASTE:
       				cpy_stream(&gt_edit_stream, g_apt_streams[copy_idx]);
                    add_stream(&gt_edit_stream);
                    re_populate_items();
       				return 0 ;

                case    IDM_STREAM_MAKE_FRAGS:
                {
       				ret=DialogBox(g_hInstance, TEXT("FRAG_DLG"), hwnd, FragDlgProc);
       				return 0 ;
                }

                case    IDM_STREAM_SEL2PCAP:
                {
                    ret=get_save_file_name(file_name, hwnd, PCAP_FILE_FILTER, PCAP_FILE_SUFFIX);
                    if (ret) return 0 ;

       				stream2dump(file_name);
                    update_pcap_file_history(file_name);
       				return 0 ;
                }

                case    IDM_STREAM_2_BIN:
                {
                    ret=get_save_file_name(file_name, hwnd, BIN_FILE_FILTER, BIN_FILE_SUFFIX);
                    if (ret) return 0 ;

       				stream_2_bin(file_name);
       				return 0 ;
                }

                case    IDM_STREAM_2_TEXT:
                {
                    ret=get_save_file_name(file_name, hwnd, TEXT_FILE_FILTER, TEXT_FILE_SUFFIX);
                    if (ret) return 0 ;

       				stream_2_text(file_name);
       				return 0 ;
                }
                
                case    ID_DYNAMIC_EDIT:
       				if (HIWORD(wParam)==EN_KILLFOCUS)
                    {

            update_grid_from_edit(edit_iItem, edit_iSubItem);
            edit_iItem=-1;

           				return 0 ;
                    }

            }
            break;

case   WM_KEYDOWN:
    if (VK_RETURN==wParam)
    {

        SetFocus(hwnd);

         return 0;

    }
    break;

        case 	WM_INITMENUPOPUP:
        {
            int idx=GetIndex(hwnd_lv);
            t_stream *pt_stream = g_apt_streams[idx];
            int sel_cnt=GetSelCnt(hwnd_lv);
            int item_cnt=ListView_GetItemCount(hwnd_lv);
            if (lParam == 0)
            {
                UINT add_stream_menu_state = nr_cur_stream<MAX_STREAM_NUM ? MF_ENABLED : MF_GRAYED;
                EnableMenuItem ((HMENU) wParam, IDM_STREAM_NEW, add_stream_menu_state);
                EnableMenuItem ((HMENU) wParam, IDM_STREAM_NEW_HEX, add_stream_menu_state);
                EnableMenuItem ((HMENU) wParam, IDM_STREAM_NEW_BIN, add_stream_menu_state);
                EnableMenuItem ((HMENU) wParam, IDM_STREAM_NEW_PCAP, add_stream_menu_state);
                EnableMenuItem ((HMENU) wParam, IDM_STREAM_EDIT, idx>=0 ? MF_ENABLED : MF_GRAYED);
                EnableMenuItem ((HMENU) wParam, IDM_STREAM_DEL, idx>=0 ? MF_ENABLED : MF_GRAYED);
                EnableMenuItem ((HMENU) wParam, IDM_STREAM_COPY, idx>=0 ? MF_ENABLED : MF_GRAYED);
                EnableMenuItem ((HMENU) wParam, IDM_STREAM_PASTE, copy_idx>=0 && item_cnt>copy_idx ? MF_ENABLED : MF_GRAYED);
                EnableMenuItem ((HMENU) wParam, IDM_STREAM_DEL_SEL, sel_cnt ? MF_ENABLED : MF_GRAYED);
                EnableMenuItem ((HMENU) wParam, IDM_STREAM_SEL_ALL, item_cnt ? MF_ENABLED : MF_GRAYED);
                EnableMenuItem ((HMENU) wParam, IDM_STREAM_SEL_RVS, item_cnt ? MF_ENABLED : MF_GRAYED);
                EnableMenuItem ((HMENU) wParam, IDM_STREAM_MAKE_FRAGS, idx>=0&&stream_fragable(pt_stream) ? MF_ENABLED : MF_GRAYED);
                EnableMenuItem ((HMENU) wParam, IDM_STREAM_SEL2PCAP, sel_cnt ? MF_ENABLED : MF_GRAYED);
                EnableMenuItem ((HMENU) wParam, IDM_STREAM_2_BIN, idx>=0 ? MF_ENABLED : MF_GRAYED);
                EnableMenuItem ((HMENU) wParam, IDM_STREAM_2_TEXT, idx>=0 ? MF_ENABLED : MF_GRAYED);

                return 0;

            }

            break;
        }

    }
    
    return DefWindowProc (hwnd, message, wParam, lParam) ;
}