int main(int argc, char *argv[]) { const char *op = NULL; int ret; av_log_set_level(AV_LOG_DEBUG); if (argc < 2) { usage(argv[0]); return 1; } /* register codecs and formats and other lavf/lavc components*/ av_register_all(); avformat_network_init(); op = argv[1]; if (strcmp(op, "list") == 0) { if (argc < 3) { av_log(NULL, AV_LOG_INFO, "Missing argument for list operation.\n"); ret = AVERROR(EINVAL); } else { ret = list_op(argv[2]); } } else if (strcmp(op, "del") == 0) { if (argc < 3) { av_log(NULL, AV_LOG_INFO, "Missing argument for del operation.\n"); ret = AVERROR(EINVAL); } else { ret = del_op(argv[2]); } } else if (strcmp(op, "move") == 0) { if (argc < 4) { av_log(NULL, AV_LOG_INFO, "Missing argument for move operation.\n"); ret = AVERROR(EINVAL); } else { ret = move_op(argv[2], argv[3]); } } else { av_log(NULL, AV_LOG_INFO, "Invalid operation %s\n", op); ret = AVERROR(EINVAL); } avformat_network_deinit(); return ret < 0 ? 1 : 0; }
void LIR_Assembler::emit_op1(LIR_Op1* op) { switch (op->code()) { case lir_move: if (op->move_kind() == lir_move_volatile) { assert(op->patch_code() == lir_patch_none, "can't patch volatiles"); volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info()); } else { move_op(op->in_opr(), op->result_opr(), op->type(), op->patch_code(), op->info(), op->pop_fpu_stack(), op->move_kind() == lir_move_unaligned, op->move_kind() == lir_move_wide); } break; case lir_roundfp: { LIR_OpRoundFP* round_op = op->as_OpRoundFP(); roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack()); break; } case lir_return: return_op(op->in_opr()); break; case lir_safepoint: if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) { _masm->nop(); } safepoint_poll(op->in_opr(), op->info()); break; case lir_fxch: fxch(op->in_opr()->as_jint()); break; case lir_fld: fld(op->in_opr()->as_jint()); break; case lir_ffree: ffree(op->in_opr()->as_jint()); break; case lir_branch: break; case lir_push: push(op->in_opr()); break; case lir_pop: pop(op->in_opr()); break; case lir_neg: negate(op->in_opr(), op->result_opr()); break; case lir_leal: leal(op->in_opr(), op->result_opr()); break; case lir_null_check: if (GenerateCompilerNullChecks) { ImplicitNullCheckStub* stub = add_debug_info_for_null_check_here(op->info()); if (op->in_opr()->is_single_cpu()) { _masm->null_check(op->in_opr()->as_register(), stub->entry()); } else { Unimplemented(); } } break; case lir_monaddr: monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr()); break; #ifdef SPARC case lir_pack64: pack64(op->in_opr(), op->result_opr()); break; case lir_unpack64: unpack64(op->in_opr(), op->result_opr()); break; #endif case lir_unwind: unwind_op(op->in_opr()); break; default: Unimplemented(); break; } }
void LIR_Assembler::emit_op1(LIR_Op1* op) { switch (op->code()) { case lir_move: if (op->move_kind() == lir_move_volatile) { assert(op->patch_code() == lir_patch_none, "can't patch volatiles"); volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info()); } else { move_op(op->in_opr(),op->result_opr(),op->tmp1_opr(),op->tmp2_opr(),op->tmp3_opr(),op->type(), op->patch_code(), op->info(), op->move_kind() == lir_move_unaligned); } break; case lir_prefetchr: prefetchr(op->in_opr()); break; case lir_prefetchw: prefetchw(op->in_opr()); break; case lir_return: return_op(op->in_opr()); break; case lir_branch: break; case lir_push: push(op->in_opr()); break; case lir_pop: pop(op->in_opr()); break; case lir_neg: negate(op->in_opr(), op->result_opr()); break; case lir_bit_test: bit_test(op->in_opr(),op->result_opr()); break; case lir_leal: leal(op->in_opr(), op->result_opr()); break; case lir_null_check: if (GenerateCompilerNullChecks) { null_check(op->in_opr(),op->info()); } break; case lir_klassTable_oop_load: klassTable_oop_load(op->in_opr(), op->result_opr(), op->tmp1_opr()); break; case lir_monaddr: monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr()); break; default: Unimplemented(); break; } }