std::unique_ptr<Unit>
UnitRepoProxy::load(const std::string& name, const MD5& md5) {
  UnitEmitter ue(md5);
  if (!loadHelper(ue, name, md5)) return nullptr;

#ifdef USE_JEMALLOC
  if (RuntimeOption::TrackPerUnitMemory) {
    size_t len = sizeof(uint64_t*);
    uint64_t* alloc;
    uint64_t* del;
    mallctl("thread.allocatedp", static_cast<void*>(&alloc), &len, nullptr, 0);
    mallctl("thread.deallocatedp", static_cast<void*>(&del), &len, nullptr, 0);
    auto before = *alloc;
    auto debefore = *del;
    std::unique_ptr<Unit> result = ue.create();
    auto after = *alloc;
    auto deafter = *del;

    auto path = folly::sformat("/tmp/units-{}.map", getpid());
    auto change = (after - deafter) - (before - debefore);
    auto str = folly::sformat("{} {}\n", name, change);
    auto out = std::fopen(path.c_str(), "a");
    if (out) {
      std::fwrite(str.data(), str.size(), 1, out);
      std::fclose(out);
    }

    return result;
  }
#endif

  return ue.create();
}
Example #2
0
Unit* UnitRepoProxy::load(const std::string& name, const MD5& md5) {
  UnitEmitter ue(md5);
  ue.setFilepath(StringData::GetStaticString(name));
  // Look for a repo that contains a unit with matching MD5.
  int repoId;
  for (repoId = RepoIdCount - 1; repoId >= 0; --repoId) {
    if (!getUnit(repoId).get(ue, md5)) {
      break;
    }
  }
  if (repoId < 0) {
    TRACE(3, "No repo contains '%s' (0x%016llx%016llx)\n",
             name.c_str(), md5.q[0], md5.q[1]);
    return NULL;
  }
  try {
    getUnitLitstrs(repoId).get(ue);
    getUnitArrays(repoId).get(ue);
    getUnitPreConsts(repoId).get(ue);
    m_repo.pcrp().getPreClasses(repoId).get(ue);
    m_repo.frp().getFuncs(repoId).get(ue);
  } catch (RepoExc& re) {
    TRACE(0, "Repo error loading '%s' (0x%016llx%016llx) from '%s': %s\n",
             name.c_str(), md5.q[0], md5.q[1], m_repo.repoName(repoId).c_str(),
             re.msg().c_str());
    return NULL;
  }
  TRACE(3, "Repo loaded '%s' (0x%016llx%016llx) from '%s'\n",
           name.c_str(), md5.q[0], md5.q[1], m_repo.repoName(repoId).c_str());
  return ue.create();
}
Example #3
0
TEST(BaseCluster, Read)
{
    std::stringstream input;
    tawara::UIntElement tc(tawara::ids::Timecode, 42);
    tawara::UIntElement st1(tawara::ids::SilentTrackNumber, 1);
    tawara::UIntElement st2(tawara::ids::SilentTrackNumber, 2);
    tawara::UIntElement ps(tawara::ids::PrevSize, 0x1234);

    FakeCluster e;
    std::streamsize body_size(tc.size());
    tawara::vint::write(body_size, input);
    tc.write(input);
    EXPECT_EQ(tawara::vint::size(body_size) + body_size,
            e.read(input));
    EXPECT_EQ(42, e.timecode());
    EXPECT_TRUE(e.silent_tracks().empty());
    EXPECT_EQ(0, e.previous_size());

    body_size += tawara::ids::size(tawara::ids::SilentTracks) +
        tawara::vint::size(st1.size() + st2.size()) +
        st1.size() + st2.size() + ps.size();
    tawara::vint::write(body_size, input);
    tc.write(input);
    tawara::ids::write(tawara::ids::SilentTracks, input);
    tawara::vint::write(st1.size() + st2.size(), input);
    st1.write(input);
    st2.write(input);
    ps.write(input);
    EXPECT_EQ(tawara::vint::size(body_size) + body_size,
            e.read(input));
    EXPECT_EQ(42, e.timecode());
    EXPECT_FALSE(e.silent_tracks().empty());
    EXPECT_EQ(0x1234, e.previous_size());

    // Body size value wrong (too small)
    input.str(std::string());
    tawara::vint::write(2, input);
    tc.write(input);
    ps.write(input);
    EXPECT_THROW(e.read(input), tawara::BadBodySize);
    // Invalid child
    input.str(std::string());
    tawara::UIntElement ue(tawara::ids::EBML, 0xFFFF);
    tawara::vint::write(ue.size(), input);
    ue.write(input);
    EXPECT_THROW(e.read(input), tawara::InvalidChildID);
    // Missing timecode
    input.str(std::string());
    tawara::vint::write(ps.size(), input);
    ps.write(input);
    EXPECT_THROW(e.read(input), tawara::MissingChild);
}
Example #4
0
// Read signed Exp-Golomb code from bitstream
int64_t se(struct bitstream *bstr)
{
    int64_t res = 0;

    res = ue(bstr);
    
    // The following function might truncate when res+1 overflows
    //res = (res+1)/2 * (res % 2 ? 1 : -1);
    // Use this:
    res = (res/2+(res%2 ? 1 : 0)) * (res % 2 ? 1 : -1);

    return res;
}
Example #5
0
// determine what filter group the given username is in
// return -1 when user not found
int AuthPlugin::determineGroup(std::string &user, int &fg)
{
	if (user.length() < 1 || user == "-") {
		return DGAUTH_NOMATCH;
	}
	String u(user);
	u.toLower();  // since the filtergroupslist is read in in lowercase, we should do this.
	user = u.toCharArray();  // also pass back to ConnectionHandler, so appears lowercase in logs
	String ue(u);
	ue += "=";

	char *i = o.filter_groups_list.findStartsWithPartial(ue.toCharArray());

	if (i == NULL) {
#ifdef DGDEBUG
		std::cout << "User not in filter groups list: " << ue << std::endl;
#endif
		return DGAUTH_NOUSER;
	}
#ifdef DGDEBUG
	std::cout << "User found: " << i << std::endl;
#endif
	ue = i;
	if (ue.before("=") == u) {
		ue = ue.after("=filter");
		int l = ue.length();
		if (l < 1 || l > 2) {
			return DGAUTH_NOUSER;
		}
		fg = ue.toInteger();
		if (fg > o.numfg) {
			return DGAUTH_NOUSER;
		}
		if (fg > 0) {
			fg--;
		}
		return DGAUTH_OK;
	}
	return DGAUTH_NOUSER;
}
Example #6
0
 std::string get_exception_information(EXCEPTION_POINTERS & eps) {
   std::stringstream narrow_stream;
   int skip = 0;
   EXCEPTION_RECORD * er = eps.ExceptionRecord;
   switch (er->ExceptionCode) {
     case MSC_EXCEPTION_CODE: { // C++ exception
       UntypedException ue(eps);
       if (std::exception * e = exception_cast<std::exception>(ue)) {
         narrow_stream << typeid(*e).name() << "\n" << e->what();
       } else {
         narrow_stream << "Unknown C++ exception thrown.\n";
         get_exception_types(narrow_stream, ue);
       }
       skip = 2; // skips RaiseException() and _CxxThrowException()
     } break;
     case ASSERT_EXCEPTION_CODE: {
       char * assert_message = reinterpret_cast<char *>(er->ExceptionInformation[0]);
       narrow_stream << assert_message;
       free(assert_message);
       skip = 1; // skips RaiseException()
     } break;
     case EXCEPTION_ACCESS_VIOLATION: {
       narrow_stream << "Access violation. Illegal "
                     << (er->ExceptionInformation[0] ? "write" : "read")
                     << " by "
                     << er->ExceptionAddress
                     << " at "
                     << reinterpret_cast<void *>(er->ExceptionInformation[1]);
     } break;
     default: {
       narrow_stream << "SEH exception thrown. Exception code: "
                     << std::hex << std::uppercase << er->ExceptionCode
                     << " at "
                     << er->ExceptionAddress;
     }
   }
   narrow_stream << "\n\nStack Trace:\n";
   generate_stack_walk(narrow_stream, *(eps.ContextRecord), skip);
   return narrow_stream.str();
 }
Example #7
0
static void vaapi_encode_h264_write_buffering_period(PutBitContext *pbc,
                                                     VAAPIEncodeContext *ctx,
                                                     VAAPIEncodePicture *pic)
{
    VAAPIEncodeH264Context            *priv = ctx->priv_data;
    VAAPIEncodeH264MiscSequenceParams *mseq = &priv->misc_sequence_params;
    VAEncPictureParameterBufferH264   *vpic = pic->codec_picture_params;
    int i;

    ue(vpic_var(seq_parameter_set_id));

    if (mseq->nal_hrd_parameters_present_flag) {
        for (i = 0; i <= mseq->cpb_cnt_minus1; i++) {
            u(mseq->initial_cpb_removal_delay_length_minus1 + 1,
              mseq_var(initial_cpb_removal_delay));
            u(mseq->initial_cpb_removal_delay_length_minus1 + 1,
              mseq_var(initial_cpb_removal_delay_offset));
        }
    }
    if (mseq->vcl_hrd_parameters_present_flag) {
        av_assert0(0 && "vcl hrd parameters not supported");
    }
}
Example #8
0
static void vaapi_encode_h264_write_pps(PutBitContext *pbc,
                                        VAAPIEncodeContext *ctx)
{
    VAEncPictureParameterBufferH264   *vpic = ctx->codec_picture_params;
    VAAPIEncodeH264Context            *priv = ctx->priv_data;
    VAAPIEncodeH264MiscSequenceParams *mseq = &priv->misc_sequence_params;

    vaapi_encode_h264_write_nal_header(pbc, NAL_PPS, 3);

    ue(vpic_var(pic_parameter_set_id));
    ue(vpic_var(seq_parameter_set_id));

    u(1, vpic_field(entropy_coding_mode_flag));
    u(1, mseq_var(bottom_field_pic_order_in_frame_present_flag));

    ue(mseq_var(num_slice_groups_minus1));
    if (mseq->num_slice_groups_minus1 > 0) {
        ue(mseq_var(slice_group_map_type));
        av_assert0(0 && "slice groups not supported");
    }

    ue(vpic_var(num_ref_idx_l0_active_minus1));
    ue(vpic_var(num_ref_idx_l1_active_minus1));

    u(1, vpic_field(weighted_pred_flag));
    u(2, vpic_field(weighted_bipred_idc));

    se(vpic->pic_init_qp - 26, pic_init_qp_minus26);
    se(mseq_var(pic_init_qs_minus26));
    se(vpic_var(chroma_qp_index_offset));

    u(1, vpic_field(deblocking_filter_control_present_flag));
    u(1, vpic_field(constrained_intra_pred_flag));
    u(1, vpic_field(redundant_pic_cnt_present_flag));
    u(1, vpic_field(transform_8x8_mode_flag));

    u(1, vpic_field(pic_scaling_matrix_present_flag));
    if (vpic->pic_fields.bits.pic_scaling_matrix_present_flag) {
        av_assert0(0 && "scaling matrices not supported");
    }

    se(vpic_var(second_chroma_qp_index_offset));

    vaapi_encode_h264_write_trailing_rbsp(pbc);
}
Example #9
0
void* generate_traffic(void *arg) {
	int ue_num;
	bool time_exceeded;
	
	ue_num = *((int*)arg);
	time_exceeded = false;
	while (1) {
		Client to_mme;
		UE ue(ue_num);

		to_mme.bind_client();
		to_mme.fill_server_details(g_mme_port, g_mme_addr);
		to_mme.connect_with_server(ue_num);	
		attach(ue, to_mme);
		send_traffic(ue);
		detach(ue, to_mme);
		time_check(g_start_time, g_req_duration, time_exceeded);
		if (time_exceeded) {
			break;
		}
	}
	return NULL;
}
Example #10
0
static void vaapi_encode_h264_write_slice_header2(PutBitContext *pbc,
                                                  VAAPIEncodeContext *ctx,
                                                  VAAPIEncodePicture *pic,
                                                  VAAPIEncodeSlice *slice)
{
    VAEncSequenceParameterBufferH264  *vseq = ctx->codec_sequence_params;
    VAEncPictureParameterBufferH264   *vpic = pic->codec_picture_params;
    VAEncSliceParameterBufferH264   *vslice = slice->codec_slice_params;
    VAAPIEncodeH264Context            *priv = ctx->priv_data;
    VAAPIEncodeH264MiscSequenceParams *mseq = &priv->misc_sequence_params;
    VAAPIEncodeH264Slice            *pslice = slice->priv_data;
    VAAPIEncodeH264MiscSliceParams  *mslice = &pslice->misc_slice_params;

    vaapi_encode_h264_write_nal_header(pbc, mslice->nal_unit_type,
                                       mslice->nal_ref_idc);

    ue(vslice->macroblock_address, first_mb_in_slice);
    ue(vslice_var(slice_type));
    ue(vpic_var(pic_parameter_set_id));

    if (mseq->separate_colour_plane_flag) {
        u(2, mslice_var(colour_plane_id));
    }

    u(4 + vseq->seq_fields.bits.log2_max_frame_num_minus4,
      (vpic->frame_num &
       ((1 << (4 + vseq->seq_fields.bits.log2_max_frame_num_minus4)) - 1)),
      frame_num);

    if (!vseq->seq_fields.bits.frame_mbs_only_flag) {
        u(1, mslice_var(field_pic_flag));
        if (mslice->field_pic_flag)
            u(1, mslice_var(bottom_field_flag));
    }

    if (vpic->pic_fields.bits.idr_pic_flag) {
        ue(vslice_var(idr_pic_id));
    }

    if (vseq->seq_fields.bits.pic_order_cnt_type == 0) {
        u(4 + vseq->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4,
          vslice_var(pic_order_cnt_lsb));
        if (mseq->bottom_field_pic_order_in_frame_present_flag &&
            !mslice->field_pic_flag) {
            se(vslice_var(delta_pic_order_cnt_bottom));
        }
    }

    if (vseq->seq_fields.bits.pic_order_cnt_type == 1 &&
        !vseq->seq_fields.bits.delta_pic_order_always_zero_flag) {
        se(vslice_var(delta_pic_order_cnt[0]));
        if (mseq->bottom_field_pic_order_in_frame_present_flag &&
            !mslice->field_pic_flag) {
            se(vslice_var(delta_pic_order_cnt[1]));
        }
    }

    if (vpic->pic_fields.bits.redundant_pic_cnt_present_flag) {
        ue(mslice_var(redundant_pic_cnt));
    }

    if (vslice->slice_type == SLICE_TYPE_B) {
        u(1, vslice_var(direct_spatial_mv_pred_flag));
    }

    if (vslice->slice_type == SLICE_TYPE_P ||
        vslice->slice_type == SLICE_TYPE_SP ||
        vslice->slice_type == SLICE_TYPE_B) {
        u(1, vslice_var(num_ref_idx_active_override_flag));
        if (vslice->num_ref_idx_active_override_flag) {
            ue(vslice_var(num_ref_idx_l0_active_minus1));
            if (vslice->slice_type == SLICE_TYPE_B)
                ue(vslice_var(num_ref_idx_l1_active_minus1));
        }
    }

    if (mslice->nal_unit_type == 20 || mslice->nal_unit_type == 21) {
        av_assert0(0 && "no MVC support");
    } else {
        if (vslice->slice_type % 5 != 2 && vslice->slice_type % 5 != 4) {
            u(1, mslice_var(ref_pic_list_modification_flag_l0));
            if (mslice->ref_pic_list_modification_flag_l0) {
                av_assert0(0 && "ref pic list modification");
            }
        }
        if (vslice->slice_type % 5 == 1) {
            u(1, mslice_var(ref_pic_list_modification_flag_l1));
            if (mslice->ref_pic_list_modification_flag_l1) {
                av_assert0(0 && "ref pic list modification");
            }
        }
    }

    if ((vpic->pic_fields.bits.weighted_pred_flag &&
         (vslice->slice_type == SLICE_TYPE_P ||
          vslice->slice_type == SLICE_TYPE_SP)) ||
        (vpic->pic_fields.bits.weighted_bipred_idc == 1 &&
         vslice->slice_type == SLICE_TYPE_B)) {
        av_assert0(0 && "prediction weights not supported");
    }

    av_assert0(mslice->nal_ref_idc > 0 ==
               vpic->pic_fields.bits.reference_pic_flag);
    if (mslice->nal_ref_idc != 0) {
        if (vpic->pic_fields.bits.idr_pic_flag) {
            u(1, mslice_var(no_output_of_prior_pics_flag));
            u(1, mslice_var(long_term_reference_flag));
        } else {
            u(1, mslice_var(adaptive_ref_pic_marking_mode_flag));
            if (mslice->adaptive_ref_pic_marking_mode_flag) {
                av_assert0(0 && "MMCOs not supported");
            }
        }
    }

    if (vpic->pic_fields.bits.entropy_coding_mode_flag &&
        vslice->slice_type != SLICE_TYPE_I &&
        vslice->slice_type != SLICE_TYPE_SI) {
        ue(vslice_var(cabac_init_idc));
    }

    se(vslice_var(slice_qp_delta));
    if (vslice->slice_type == SLICE_TYPE_SP ||
        vslice->slice_type == SLICE_TYPE_SI) {
        if (vslice->slice_type == SLICE_TYPE_SP)
            u(1, mslice_var(sp_for_switch_flag));
        se(mslice_var(slice_qs_delta));
    }

    if (vpic->pic_fields.bits.deblocking_filter_control_present_flag) {
        ue(vslice_var(disable_deblocking_filter_idc));
        if (vslice->disable_deblocking_filter_idc != 1) {
            se(vslice_var(slice_alpha_c0_offset_div2));
            se(vslice_var(slice_beta_offset_div2));
        }
    }

    if (mseq->num_slice_groups_minus1 > 0 &&
        mseq->slice_group_map_type >= 3 && mseq->slice_group_map_type <= 5) {
        av_assert0(0 && "slice groups not supported");
    }

    // No alignment - this need not be a byte boundary.
}
Example #11
0
static void vaapi_encode_h264_write_sps(PutBitContext *pbc,
                                        VAAPIEncodeContext *ctx)
{
    VAEncSequenceParameterBufferH264  *vseq = ctx->codec_sequence_params;
    VAAPIEncodeH264Context            *priv = ctx->priv_data;
    VAAPIEncodeH264MiscSequenceParams *mseq = &priv->misc_sequence_params;
    int i;

    vaapi_encode_h264_write_nal_header(pbc, NAL_SPS, 3);

    u(8, mseq_var(profile_idc));
    u(1, mseq_var(constraint_set0_flag));
    u(1, mseq_var(constraint_set1_flag));
    u(1, mseq_var(constraint_set2_flag));
    u(1, mseq_var(constraint_set3_flag));
    u(1, mseq_var(constraint_set4_flag));
    u(1, mseq_var(constraint_set5_flag));
    u(2, 0, reserved_zero_2bits);

    u(8, vseq_var(level_idc));

    ue(vseq_var(seq_parameter_set_id));

    if (mseq->profile_idc == 100 || mseq->profile_idc == 110 ||
        mseq->profile_idc == 122 || mseq->profile_idc == 244 ||
        mseq->profile_idc ==  44 || mseq->profile_idc ==  83 ||
        mseq->profile_idc ==  86 || mseq->profile_idc == 118 ||
        mseq->profile_idc == 128 || mseq->profile_idc == 138) {
        ue(vseq_field(chroma_format_idc));

        if (vseq->seq_fields.bits.chroma_format_idc == 3)
            u(1, mseq_var(separate_colour_plane_flag));

        ue(vseq_var(bit_depth_luma_minus8));
        ue(vseq_var(bit_depth_chroma_minus8));

        u(1, mseq_var(qpprime_y_zero_transform_bypass_flag));

        u(1, vseq_field(seq_scaling_matrix_present_flag));
        if (vseq->seq_fields.bits.seq_scaling_matrix_present_flag) {
            av_assert0(0 && "scaling matrices not supported");
        }
    }

    ue(vseq_field(log2_max_frame_num_minus4));
    ue(vseq_field(pic_order_cnt_type));

    if (vseq->seq_fields.bits.pic_order_cnt_type == 0) {
        ue(vseq_field(log2_max_pic_order_cnt_lsb_minus4));
    } else if (vseq->seq_fields.bits.pic_order_cnt_type == 1) {
        u(1, mseq_var(delta_pic_order_always_zero_flag));
        se(vseq_var(offset_for_non_ref_pic));
        se(vseq_var(offset_for_top_to_bottom_field));
        ue(vseq_var(num_ref_frames_in_pic_order_cnt_cycle));

        for (i = 0; i < vseq->num_ref_frames_in_pic_order_cnt_cycle; i++)
            se(vseq_var(offset_for_ref_frame[i]));
    }

    ue(vseq_var(max_num_ref_frames));
    u(1, mseq_var(gaps_in_frame_num_allowed_flag));

    ue(vseq->picture_width_in_mbs  - 1, pic_width_in_mbs_minus1);
    ue(vseq->picture_height_in_mbs - 1, pic_height_in_mbs_minus1);

    u(1, vseq_field(frame_mbs_only_flag));
    if (!vseq->seq_fields.bits.frame_mbs_only_flag)
        u(1, vseq_field(mb_adaptive_frame_field_flag));

    u(1, vseq_field(direct_8x8_inference_flag));

    u(1, vseq_var(frame_cropping_flag));
    if (vseq->frame_cropping_flag) {
        ue(vseq_var(frame_crop_left_offset));
        ue(vseq_var(frame_crop_right_offset));
        ue(vseq_var(frame_crop_top_offset));
        ue(vseq_var(frame_crop_bottom_offset));
    }

    u(1, vseq_var(vui_parameters_present_flag));
    if (vseq->vui_parameters_present_flag)
        vaapi_encode_h264_write_vui(pbc, ctx);

    vaapi_encode_h264_write_trailing_rbsp(pbc);
}
Example #12
0
static void vaapi_encode_h264_write_vui(PutBitContext *pbc,
                                        VAAPIEncodeContext *ctx)
{
    VAEncSequenceParameterBufferH264  *vseq = ctx->codec_sequence_params;
    VAAPIEncodeH264Context            *priv = ctx->priv_data;
    VAAPIEncodeH264MiscSequenceParams *mseq = &priv->misc_sequence_params;
    int i;

    u(1, vvui_field(aspect_ratio_info_present_flag));
    if (vseq->vui_fields.bits.aspect_ratio_info_present_flag) {
        u(8, vseq_var(aspect_ratio_idc));
        if (vseq->aspect_ratio_idc == 255) {
            u(16, vseq_var(sar_width));
            u(16, vseq_var(sar_height));
        }
    }

    u(1, mseq_var(overscan_info_present_flag));
    if (mseq->overscan_info_present_flag)
        u(1, mseq_var(overscan_appropriate_flag));

    u(1, mseq_var(video_signal_type_present_flag));
    if (mseq->video_signal_type_present_flag) {
        u(3, mseq_var(video_format));
        u(1, mseq_var(video_full_range_flag));
        u(1, mseq_var(colour_description_present_flag));
        if (mseq->colour_description_present_flag) {
            u(8, mseq_var(colour_primaries));
            u(8, mseq_var(transfer_characteristics));
            u(8, mseq_var(matrix_coefficients));
        }
    }

    u(1, mseq_var(chroma_loc_info_present_flag));
    if (mseq->chroma_loc_info_present_flag) {
        ue(mseq_var(chroma_sample_loc_type_top_field));
        ue(mseq_var(chroma_sample_loc_type_bottom_field));
    }

    u(1, vvui_field(timing_info_present_flag));
    if (vseq->vui_fields.bits.timing_info_present_flag) {
        u(32, vseq_var(num_units_in_tick));
        u(32, vseq_var(time_scale));
        u(1, mseq_var(fixed_frame_rate_flag));
    }

    u(1, mseq_var(nal_hrd_parameters_present_flag));
    if (mseq->nal_hrd_parameters_present_flag) {
        ue(mseq_var(cpb_cnt_minus1));
        u(4, mseq_var(bit_rate_scale));
        u(4, mseq_var(cpb_size_scale));
        for (i = 0; i <= mseq->cpb_cnt_minus1; i++) {
            ue(mseq_var(bit_rate_value_minus1[i]));
            ue(mseq_var(cpb_size_value_minus1[i]));
            u(1, mseq_var(cbr_flag[i]));
        }
        u(5, mseq_var(initial_cpb_removal_delay_length_minus1));
        u(5, mseq_var(cpb_removal_delay_length_minus1));
        u(5, mseq_var(dpb_output_delay_length_minus1));
        u(5, mseq_var(time_offset_length));
    }
    u(1, mseq_var(vcl_hrd_parameters_present_flag));
    if (mseq->vcl_hrd_parameters_present_flag) {
        av_assert0(0 && "vcl hrd parameters not supported");
    }

    if (mseq->nal_hrd_parameters_present_flag ||
        mseq->vcl_hrd_parameters_present_flag)
        u(1, mseq_var(low_delay_hrd_flag));
    u(1, mseq_var(pic_struct_present_flag));

    u(1, vvui_field(bitstream_restriction_flag));
    if (vseq->vui_fields.bits.bitstream_restriction_flag) {
        av_assert0(0 && "bitstream restrictions not supported");
    }
}
Example #13
0
std::unique_ptr<Unit>
UnitRepoProxy::load(const std::string& name, const MD5& md5) {
  UnitEmitter ue(md5);
  if (!loadHelper(ue, name, md5)) return nullptr;
  return ue.create();
}
Example #14
0
int64_t se(void *addr, int *bit_offset) {
  int codeNum = ue(addr, bit_offset);
  return ((codeNum % 2 == 1) ? 1 : -1) * (codeNum / 2 + codeNum % 2);
}
Example #15
0
int main(int argc, char **argv) {
  int width = 1280, height = 544;
  Display *display = XOpenDisplay(NULL);

  Window root = XDefaultRootWindow(display);
  Window window = XCreateSimpleWindow(
      display, root, 0, 0, 1280, 544, 0, 0, 0);
  XSelectInput(display, window, ExposureMask | KeyPressMask);
  XMapWindow(display, window);
  XSync(display, 0);

  VdpDevice dev;

  mark("vdp_device_create_x11\n");
  int ret = vdp_device_create_x11(display, 0, &dev, &vdp_get_proc_address);
  assert(ret == VDP_STATUS_OK);

#define get(id, func) \
  ret = vdp_get_proc_address(dev, id, (void **)&func); \
  assert(ret == VDP_STATUS_OK);

  get(VDP_FUNC_ID_DECODER_CREATE, vdp_decoder_create);
  get(VDP_FUNC_ID_DECODER_DESTROY, vdp_decoder_destroy);
  get(VDP_FUNC_ID_DECODER_RENDER, vdp_decoder_render);

  get(VDP_FUNC_ID_VIDEO_MIXER_CREATE, vdp_video_mixer_create);
  get(VDP_FUNC_ID_VIDEO_MIXER_DESTROY, vdp_video_mixer_destroy);
  get(VDP_FUNC_ID_VIDEO_MIXER_RENDER, vdp_video_mixer_render);

  get(VDP_FUNC_ID_VIDEO_SURFACE_CREATE, vdp_video_surface_create);
  get(VDP_FUNC_ID_VIDEO_SURFACE_DESTROY, vdp_video_surface_destroy);
  get(VDP_FUNC_ID_VIDEO_SURFACE_GET_BITS_Y_CB_CR, vdp_video_surface_get_bits_ycbcr);

  get(VDP_FUNC_ID_OUTPUT_SURFACE_CREATE, vdp_output_surface_create);
  get(VDP_FUNC_ID_OUTPUT_SURFACE_DESTROY, vdp_output_surface_destroy);
  get(VDP_FUNC_ID_OUTPUT_SURFACE_GET_BITS_NATIVE, vdp_output_surface_get_bits_native);

  get(VDP_FUNC_ID_PRESENTATION_QUEUE_CREATE, vdp_presentation_queue_create);
  get(VDP_FUNC_ID_PRESENTATION_QUEUE_DESTROY, vdp_presentation_queue_destroy);
  get(VDP_FUNC_ID_PRESENTATION_QUEUE_DISPLAY, vdp_presentation_queue_display);
  get(VDP_FUNC_ID_PRESENTATION_QUEUE_TARGET_CREATE_X11, vdp_presentation_queue_target_create_x11);
  get(VDP_FUNC_ID_PRESENTATION_QUEUE_BLOCK_UNTIL_SURFACE_IDLE, vdp_presentation_queue_block_until_surface_idle);
  get(VDP_FUNC_ID_PRESENTATION_QUEUE_GET_TIME, vdp_presentation_queue_get_time);

#undef get

  VdpDecoder dec;
  VdpVideoSurface video[16];
  VdpOutputSurface output;
  VdpPresentationQueue queue;
  VdpPresentationQueueTarget target;
  VdpVideoMixer mixer;

  VdpVideoMixerFeature mixer_features[] = {
  };
  VdpVideoMixerParameter mixer_params[] = {
    VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_WIDTH,
    VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_HEIGHT,
    VDP_VIDEO_MIXER_PARAMETER_CHROMA_TYPE
  };
  int zero = 0;
  const void *mixer_param_vals[] = {
    &width,
    &height,
    &zero
  };

  mark("vdp_decoder_create\n");
  ret = vdp_decoder_create(dev, VDP_DECODER_PROFILE_H264_MAIN, 1280, 544, 6, &dec);
  assert(ret == VDP_STATUS_OK);

  int i;
  for (i = 0; i < 16; i++) {
    mark("vdp_video_surface_create: %d\n", i);
    ret = vdp_video_surface_create(dev, VDP_CHROMA_TYPE_420, 1280, 544, &video[i]);
    assert(ret == VDP_STATUS_OK);
    mark(" <-- %d\n", video[i]);
  }


  mark("vdp_output_surface_create\n");
  ret = vdp_output_surface_create(dev, VDP_RGBA_FORMAT_B8G8R8A8, 1280, 544, &output);
  assert(ret == VDP_STATUS_OK);

  mark("vdp_presentation_queue_target_create_x11\n");
  ret = vdp_presentation_queue_target_create_x11(dev, window, &target);
  assert(ret == VDP_STATUS_OK);

  mark("vdp_presentation_queue_create\n");
  ret = vdp_presentation_queue_create(dev, target, &queue);
  assert(ret == VDP_STATUS_OK);

  mark("vdp_video_mixer_create\n");
  ret = vdp_video_mixer_create(dev, sizeof(mixer_features)/sizeof(mixer_features[0]), mixer_features, sizeof(mixer_params)/sizeof(mixer_params[0]), mixer_params, mixer_param_vals, &mixer);
  assert(ret == VDP_STATUS_OK);


  assert(argc > 1);
  int fd = open(argv[1], O_RDONLY);
  struct stat statbuf;
  assert(fstat(fd, &statbuf) == 0);
  void *addr = mmap(NULL, statbuf.st_size, PROT_READ, MAP_SHARED, fd, 0);
  void *orig_addr = addr;

  mark("mmap file addr: 0x%p size: 0x%lx\n", addr, statbuf.st_size);

  //printf("mmap'd file of size: %ld\n", statbuf.st_size);

  VdpPictureInfoH264 info = {
    .slice_count = 1,
    .field_order_cnt = { 65536, 65536 },
    .is_reference = 1,
    .frame_num = -1,
    .field_pic_flag = 0,
    .bottom_field_flag = 0,
    .num_ref_frames = 6,
    .mb_adaptive_frame_field_flag = 0,
    .constrained_intra_pred_flag = 0,
    .weighted_pred_flag = 0,
    .weighted_bipred_idc = 0,
    .frame_mbs_only_flag = 1,
    .transform_8x8_mode_flag = 0,
    .chroma_qp_index_offset = 0,
    .second_chroma_qp_index_offset = 0,
    .pic_init_qp_minus26 = 0,
    .num_ref_idx_l0_active_minus1 = 0,
    .num_ref_idx_l1_active_minus1 = 0,
    .log2_max_frame_num_minus4 = 5,
    .pic_order_cnt_type = 0,
    .log2_max_pic_order_cnt_lsb_minus4 = 6,
    .delta_pic_order_always_zero_flag = 0,
    .direct_8x8_inference_flag = 1,
    .entropy_coding_mode_flag = 1,
    .pic_order_present_flag = 0,
    .deblocking_filter_control_present_flag = 1,
    .redundant_pic_cnt_present_flag = 0,
  };
  int j;
  for (j = 0; j < 6; ++j) {
    int k;

    for (k = 0; k < 16; ++k)
      info.scaling_lists_4x4[j][k] = 16;
  }

  for (j = 0; j < 2; ++j) {
    int k;

    for (k = 0; k < 64; ++k)
      info.scaling_lists_8x8[j][k] = 16;
  }

  for (j = 0; j < 16; ++j)
    info.referenceFrames[j].surface = VDP_INVALID_HANDLE;


  mark("vdp_presentation_queue_get_time\n");
  VdpTime t;
  ret = vdp_presentation_queue_get_time(queue, &t);
  assert(ret == VDP_STATUS_OK);

  fprintf(stderr, "Start time: %ld\n", t);

  int vframe = 0;

  while ((addr - orig_addr) < statbuf.st_size) {
    int size = ntohl(*(int *)addr);
    addr += 4;
    int nal_type = (*(char *)addr) & 0x1F;
    int nal_ref_idc = (*(char *)addr) >> 5;
    if (nal_type != 1 && nal_type != 5) {
      //fprintf(stderr, "Skipping NAL type %d, size: %d\n", nal_type, size);
      addr += size;
      continue;
    }
    //fprintf(stderr, "Processing NAL type %d, ref_idc: %d, size: %d\n", nal_type, nal_ref_idc, size);

    int bit_offset = 8;
    ue(addr, &bit_offset);
    int slice_type = ue(addr, &bit_offset);
    mark("nal_type: %d, ref_idc: %d, size: %d, slice_type: %d\n", nal_type, nal_ref_idc, size, slice_type);
    //fprintf(stderr, "Slice type: %d\n", slice_type);
    ue(addr, &bit_offset);
    info.frame_num = read_bits(addr, &bit_offset, info.log2_max_frame_num_minus4 + 4);
    if (nal_type == 5) {
      ue(addr, &bit_offset);
      info.frame_num = 0;
      for (j = 0; j < 16; ++j)
        info.referenceFrames[j].surface = VDP_INVALID_HANDLE;
    }

    uint32_t poc_lsb = read_bits(addr, &bit_offset, info.log2_max_pic_order_cnt_lsb_minus4 + 4);
    info.field_order_cnt[0] = (1 << 16) + poc_lsb;
    info.field_order_cnt[1] = (1 << 16) + poc_lsb;

    info.is_reference = nal_ref_idc != 0;

    VdpBitstreamBuffer buffer[2];
    static const char header[3] = {0, 0, 1};
    buffer[0].struct_version = VDP_BITSTREAM_BUFFER_VERSION;
    buffer[0].bitstream = header;
    buffer[0].bitstream_bytes = sizeof(header);
    buffer[1].struct_version = VDP_BITSTREAM_BUFFER_VERSION;
    buffer[1].bitstream = addr;
    buffer[1].bitstream_bytes = size;
    mark("vdp_decoder_render: %d\n", video[vframe]);
    ret = vdp_decoder_render(dec, video[vframe], (void*)&info, 2, buffer);
    assert(ret == VDP_STATUS_OK);

    mark("vdp_video_mixer_render\n");
    ret = vdp_video_mixer_render(
        mixer,
        VDP_INVALID_HANDLE, NULL,
        VDP_VIDEO_MIXER_PICTURE_STRUCTURE_FRAME,
        0, NULL,
        video[vframe],
        0, NULL,
        NULL,
        output,
        NULL,
        NULL,
        0, NULL);
    assert(ret == VDP_STATUS_OK);

    t += 1000000000ULL;
    mark("vdp_presentation_queue_display\n");
    ret = vdp_presentation_queue_display(queue, output, 1280, 544, t);
    assert(ret == VDP_STATUS_OK);

    addr += size;

    /*
    uint32_t pitches[2] = {1280, 640 * 2};
    uint8_t *data[2];
    for (i = 0; i < 2; i++) {
      data[i] = malloc(1280 * 544 / (i ? 2 : 1));
      assert(data[i]);
    }
    ret = vdp_video_surface_get_bits_ycbcr(video[vframe], VDP_YCBCR_FORMAT_NV12, (void **)data, pitches);
    assert(ret == VDP_STATUS_OK);

    write(1, data[0], 1280 * 544);
    for (i = 0; i < 1280 * 544 / 2; i+=2)
      write(1, data[1] + i, 1);
    for (i = 0; i < 1280 * 544 / 2; i+=2)
      write(1, data[1] + i + 1, 1);
    */

    if (info.is_reference) {
      for (j = 5; j > 0; --j)
        memcpy(&info.referenceFrames[j], &info.referenceFrames[j-1], sizeof(info.referenceFrames[0]));
      info.referenceFrames[0].surface = video[vframe];
      memcpy(info.referenceFrames[0].field_order_cnt, info.field_order_cnt, 2 * sizeof(uint32_t));
      info.referenceFrames[0].frame_idx = info.frame_num;
      info.referenceFrames[0].top_is_reference = 1;
      info.referenceFrames[0].bottom_is_reference = 1;
    }
    vframe = (vframe + 1) % 16;
    //if (vframe > 10) break;
  }

  return 0;
}