static bool is_success(char *recv_buf,Message *recv_Msg,char *szMsg) { str2struct(recv_buf,recv_Msg); // 解析银行返回的数据 print_recv_msg(recv_Msg); if(strcmp(recv_Msg->result,"00")!=0) { set_msg(szMsg,"银行返回交易失败,代码[%s] 错误信息[%s]\n",recv_Msg->result, recv_Msg->retmessage); return false; } set_msg(szMsg,"银行返回交易成功\n"); return true; }
player_t* player_reveal(player_t *player, player_t *yet_winner, card_t **pool) { counter_t i; card_t *p[] = {NULL, NULL, NULL, NULL, NULL, NULL, NULL}; char str_combo[255]; player_t* new_winner = NULL; player_fill_pool(player, p); card_combo_to_text(str_combo, pok_resolve(p, POOL_SIZE)); if (!yet_winner) { LOG("yet_winner now is %s\n", player->name); for(i = 0; i < POOL_SIZE; i++) pool[i] = p[i]; set_msg(M1, "%s reveals %s. Press any key.", player->name, str_combo); player->is_cards_opened = TRUE; new_winner = player; goto out; } if (pok_compare(pool, p) == pool) { /* we lose! fold */ set_msg(M1, "%s folds. Press any key.", player->name); player_bank(player); player->is_in_game = FALSE; new_winner = yet_winner; goto out; } if (pok_compare(pool, p) == p) { /* we win (for now) */ yet_winner->is_in_game = FALSE; /* clear previous ties */ FOR_EACH_PLAYER(i) { if (game.players[i].is_tie) { game.players[i].is_in_game = FALSE; game.players[i].is_tie = FALSE; } } LOG("%s lefts the game\n", yet_winner->name); yet_winner = player; for(i = 0; i < POOL_SIZE; i++) pool[i] = p[i]; set_msg(M1, "%s reveals %s. Press any key.", player->name, str_combo); player->is_cards_opened = TRUE; new_winner = player; goto out; }
msi_msg_t* msi_msg_new ( uint8_t _type, const uint8_t* _typeid ) { msi_msg_t* _retu = calloc ( sizeof ( msi_msg_t ), 1 ); assert(_retu); set_msg(_retu); if ( _type == TYPE_REQUEST ){ ALLOCATE_HEADER( msi_header_request_t, _retu->_request, _typeid ) _retu->_response = NULL; } else if ( _type == TYPE_RESPONSE ) { ALLOCATE_HEADER( msi_header_response_t, _retu->_response, _typeid ) _retu->_request = NULL; } else { msi_free_msg(_retu); return NULL; } ALLOCATE_HEADER( msi_header_version_t, _retu->_version, VERSION_STRING) _retu->_friend_id = NULL; _retu->_call_type = NULL; _retu->_user_agent = NULL; _retu->_info = NULL; _retu->_next = NULL; return _retu; }
void LocalZipTask::update_state(const QString& msg) { set_msg(msg); const int progress = (0 == _total_files ? 100 : _compressed_files * 100 / _total_files); set_progress(progress > 100 ? 100 : progress); state_changed(); }
void RceQueryRefuseReceiveWishItem::MergeFrom(const RceQueryRefuseReceiveWishItem& from) { GOOGLE_CHECK_NE(&from, this); if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) { if (from._has_bit(0)) { set_type(from.type()); } if (from._has_bit(1)) { set_name(from.name()); } if (from._has_bit(2)) { set_playerid(from.playerid()); } if (from._has_bit(3)) { set_id(from.id()); } if (from._has_bit(4)) { set_guid(from.guid()); } if (from._has_bit(5)) { set_planetid(from.planetid()); } if (from._has_bit(6)) { set_time(from.time()); } if (from._has_bit(7)) { set_msg(from.msg()); } } if (from._has_bits_[8 / 32] & (0xffu << (8 % 32))) { if (from._has_bit(8)) { set_url(from.url()); } } mutable_unknown_fields()->MergeFrom(from.unknown_fields()); }
// 查询绑定 int Bank_Querybind(TRUSERID *handle,ST_PACK *rPack,int *pRetCode,char *szMsg) { set_msg(szMsg,"Bank_Querybind 功能未实现, 代码[%c]\n",rPack->sstatus0[0] ); writelog(LOG_ERR,szMsg); //return E_TRANS_FUNC_NONSUPPORT; return Bank_Process("00000",handle,rPack,pRetCode,szMsg); }
void player_collect_bank(player_t* player) { LOG("%s collects bank of %u\n", player->name, game.bank); set_msg(M2, "%s collects bank of %u\n", player->name, game.bank); game_collect_bank(); player->cash += game.bank; game.bank = 0; }
void player_collect_part(player_t* player, unsigned short part) { LOG("%s collects part of %u\n", player->name, part); set_msg(M2, "%s collects part of %u\n", player->name, game.bank); game_collect_bank(); player->cash += part; game.bank -= part; }
void MsgMessage::MergeFrom(const MsgMessage& from) { GOOGLE_CHECK_NE(&from, this); if (from._has_bits_[0 / 32] & (0xffu << (0 % 32))) { if (from._has_bit(0)) { set_msg(from.msg()); } } mutable_unknown_fields()->MergeFrom(from.unknown_fields()); }
void bug(const char *m, ...) { char *msg; va_list ap; va_start(ap, m); vasprintf(&msg, m, ap); va_end(ap); set_msg("%s; please send a bug report", msg); FREE(msg); }
MsgNew::MsgNew(int w_, int h_, const std::string& msg_, Button_ctrs_t buttons_) :View(w_,h_), msg_text(new TextView(w_-10, h_-10)), buttons(buttons_) { attach_subview(msg_text, VGPoint(get_w()/2 - msg_text->get_w()/2, 5)); fill_with_color(color); set_msg(msg_); msg_text->set_text_size(20); for (size_t i = 0; i < buttons_.size(); i++) { add_button(buttons_[i]); } }
/** * gnutls_x509_privkey_export2: * @key: Holds the key * @format: the format of output params. One of PEM or DER. * @out: will contain a private key PEM or DER encoded * * This function will export the private key to a PKCS1 structure for * RSA keys, or an integer sequence for DSA keys. The DSA keys are in * the same format with the parameters used by openssl. * * The output buffer is allocated using gnutls_malloc(). * * If the structure is PEM encoded, it will have a header * of "BEGIN RSA PRIVATE KEY". * * Returns: On success, %GNUTLS_E_SUCCESS (0) is returned, otherwise a * negative error value. * * Since 3.1.3 **/ int gnutls_x509_privkey_export2(gnutls_x509_privkey_t key, gnutls_x509_crt_fmt_t format, gnutls_datum_t * out) { const char *msg; if (key == NULL) { gnutls_assert(); return GNUTLS_E_INVALID_REQUEST; } msg = set_msg(key); return _gnutls_x509_export_int2(key->key, format, msg, out); }
/** * gnutls_x509_privkey_export: * @key: Holds the key * @format: the format of output params. One of PEM or DER. * @output_data: will contain a private key PEM or DER encoded * @output_data_size: holds the size of output_data (and will be * replaced by the actual size of parameters) * * This function will export the private key to a PKCS1 structure for * RSA keys, or an integer sequence for DSA keys. The DSA keys are in * the same format with the parameters used by openssl. * * If the buffer provided is not long enough to hold the output, then * *@output_data_size is updated and %GNUTLS_E_SHORT_MEMORY_BUFFER * will be returned. * * If the structure is PEM encoded, it will have a header * of "BEGIN RSA PRIVATE KEY". * * Returns: On success, %GNUTLS_E_SUCCESS (0) is returned, otherwise a * negative error value. **/ int gnutls_x509_privkey_export(gnutls_x509_privkey_t key, gnutls_x509_crt_fmt_t format, void *output_data, size_t * output_data_size) { const char *msg; if (key == NULL) { gnutls_assert(); return GNUTLS_E_INVALID_REQUEST; } msg = set_msg(key); return _gnutls_x509_export_int(key->key, format, msg, output_data, output_data_size); }
int valbits_at(unsigned t, size_t len, size_t off, valbits_t *res, int *s) { size_t m = ! (t & T_LSBFIRST); valbits_t u = 0; unsigned long sbit = 1 << (CHAR_BIT * len - 1); valbits_t mask = sbit - 1; int neg = FALSE; if (data_size < off + len) { set_msg("value outside file"); return -1; } if (! m) off += len - 1; while (0 < len--) { u = (u << CHAR_BIT) + data[off]; off = m ? off + 1 : off - 1; } if (u & sbit) { neg = TRUE; if (t & T_ONESCOMP) u = ~u & mask; else if (t & T_TWOSCOMP) u = (~u + 1) & mask; else if (t & T_SIGNMAG) u = u & mask; else neg = FALSE; } *res = u; if (s) *s = neg; return 0; }
static bool result(char *type,Message *recv_Msg,TRUSERID *handle,int *pRetCode,char *szMsg) { double amt; ST_CPACK aPack; ST_PACK *out_pack = &(aPack.pack); ResetNormalCPack(&aPack,0,1); SetCol(handle,0); if(strcmp(type,"20210")==0) //20210:公司方发起查询卡余额 { SetCol(handle,F_LVOL1,F_DAMT0,F_DAMT1,F_SALL_NAME, F_VSMESS,0); amt = atof(recv_Msg->pay_amount);// 取账面余额 out_pack->damt0=amt;// 取账面余额 out_pack->damt1=amt;// out_pack->lvol1=1; // 正常处理 des2src(out_pack->sall_name, recv_Msg->acctno); // 银行帐号 des2src(out_pack->vsmess, szMsg); PutRow(handle,out_pack,pRetCode,szMsg); } else if(strcmp(type,"20200")==0||strcmp(type,"20201")==0) //20200:公司发起联机转账扣款 20201:公司发起联机转账扣款反交易 { SetCol(handle,F_LVOL1,F_SPHONE, F_VSMESS,0); out_pack->lvol1=1; // 正常处理 des2src(out_pack->vsmess, szMsg); des2src(out_pack->sphone, recv_Msg->bankorder);// 银行端流水号 PutRow(handle,out_pack,pRetCode,szMsg); } else if(strcmp(type,"10000")==0||strcmp(type,"10001")==0) //10000:新增委托关系 10001:撤销委托关系 { SetCol(handle,F_SCARD0, F_VSMESS,F_VSVARSTR0, F_LSAFE_LEVEL, 0); out_pack->lsafe_level = 0; des2src(out_pack->scard0, recv_Msg->acctno); des2src(out_pack->vsmess, szMsg); des2src(out_pack->vsvarstr0, szMsg); PutRow(handle,out_pack,pRetCode,szMsg); } else { set_msg(szMsg,"功能未实现, 代码[%d]\n",type); writelog(LOG_ERR,szMsg); return false; } return true; }
msi_msg_t* msi_parse_msg ( const uint8_t* _data ) { assert(_data); msi_msg_t* _retu = calloc ( sizeof ( msi_msg_t ), 1 ); assert(_retu); set_msg(_retu); _retu->_headers = msi_parse_raw_data ( _data ); if ( msi_parse_headers (_retu) == FAILURE ) { msi_free_msg(_retu); return NULL; } if ( !_retu->_version || strcmp((const char*)_retu->_version->_header_value, VERSION_STRING) != 0 ){ msi_free_msg(_retu); return NULL; } return _retu; }
void LocalZipTask::run() { set_has_error(false); // 检查路径存在性 const QString source = get_source(), destination = get_destination(); if (!QFileInfo(source).exists()) { set_msg("Source path not exists: " + source); set_has_error(true); return; } if (!QFileInfo(QFileInfo(destination).path()).exists()) // XXX QFileInfo.path() 返回父目录 { set_msg("Parent directory of ZIP destination not exists: " + QFileInfo(destination).path()); set_has_error(true); return; } // 首先检查是否需要更新 set_msg("Checking..."); bool needUpdate = true; if (QFileInfo(destination).exists() && !QFileInfo(destination).isDir()) { const QDateTime mtime = QFileInfo(destination).lastModified(); needUpdate = newer_than(source, mtime); } if (!needUpdate) { set_msg("No need to update at " + QString::fromStdString(DateTime().get_clock_str()) + "."); return; } // 计算工作量 set_msg("Counting..."); _total_files = count_files(source); _compressed_files = 0; // 压缩文件/文件夹到zip bool rs = zip(); if (rs) set_msg("Last done at " + QString::fromStdString(DateTime().get_clock_str()) + "."); state_changed(); }
// negative filter: should callee NOT be inlined? bool InlineTree::should_not_inline(ciMethod *callee_method, ciMethod* caller_method, JVMState* jvms, WarmCallInfo* wci_result) { const char* fail_msg = NULL; // First check all inlining restrictions which are required for correctness if ( callee_method->is_abstract()) { fail_msg = "abstract method"; // // note: we allow ik->is_abstract() } else if (!callee_method->holder()->is_initialized()) { fail_msg = "method holder not initialized"; } else if ( callee_method->is_native()) { fail_msg = "native method"; } else if ( callee_method->dont_inline()) { fail_msg = "don't inline by annotation"; } // one more inlining restriction if (fail_msg == NULL && callee_method->has_unloaded_classes_in_signature()) { fail_msg = "unloaded signature classes"; } if (fail_msg != NULL) { set_msg(fail_msg); return true; } // ignore heuristic controls on inlining if (C->directive()->should_inline(callee_method)) { set_msg("force inline by CompileCommand"); return false; } if (C->directive()->should_not_inline(callee_method)) { set_msg("disallowed by CompileCommand"); return true; } int caller_bci = jvms->bci(); int inline_depth = inline_level()+1; if (CacheProfiles && ciCacheReplay::should_inline(CompilerThread::current()->get_cache_replay_state(), callee_method, caller_bci, inline_depth)) { set_msg("force inline by ciCacheReplay"); return false; } if (CacheProfiles && ciCacheReplay::should_not_inline(CompilerThread::current()->get_cache_replay_state(), callee_method, caller_bci, inline_depth)) { set_msg("disallowed by ciCacheReplay"); return true; } if (CacheProfiles && ciCacheReplay::should_not_inline(CompilerThread::current()->get_cache_replay_state(), callee_method)) { set_msg("disallowed by ciCacheReplay"); return true; } #ifndef PRODUCT if (ciReplay::should_inline(C->replay_inline_data(), callee_method, caller_bci, inline_depth)) { set_msg("force inline by ciReplay"); return false; } if (ciReplay::should_not_inline(C->replay_inline_data(), callee_method, caller_bci, inline_depth)) { set_msg("disallowed by ciReplay"); return true; } if (ciReplay::should_not_inline(callee_method)) { set_msg("disallowed by ciReplay"); return true; } #endif if (callee_method->force_inline()) { set_msg("force inline by annotation"); return false; } // Now perform checks which are heuristic if (is_unboxing_method(callee_method, C)) { // Inline unboxing methods. return false; } if (callee_method->has_compiled_code() && callee_method->instructions_size() > InlineSmallCode) { set_msg("already compiled into a big method"); return true; } // don't inline exception code unless the top method belongs to an // exception class if (caller_tree() != NULL && callee_method->holder()->is_subclass_of(C->env()->Throwable_klass())) { const InlineTree *top = this; while (top->caller_tree() != NULL) top = top->caller_tree(); ciInstanceKlass* k = top->method()->holder(); if (!k->is_subclass_of(C->env()->Throwable_klass())) { set_msg("exception method"); return true; } } // use frequency-based objections only for non-trivial methods if (callee_method->code_size() <= MaxTrivialSize) { return false; } // don't use counts with -Xcomp or CTW if (UseInterpreter && !CompileTheWorld) { if (!callee_method->has_compiled_code() && !callee_method->was_executed_more_than(0)) { set_msg("never executed"); return true; } if (is_init_with_ea(callee_method, caller_method, C)) { // Escape Analysis: inline all executed constructors return false; } else { intx counter_high_value; // Tiered compilation uses a different "high value" than non-tiered compilation. // Determine the right value to use. if (TieredCompilation) { counter_high_value = InvocationCounter::count_limit / 2; } else { counter_high_value = CompileThreshold / 2; } if (!callee_method->was_executed_more_than(MIN2(MinInliningThreshold, counter_high_value))) { set_msg("executed < MinInliningThreshold times"); return true; } } } return false; }
// positive filter: should callee be inlined? bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) { // Allows targeted inlining if (C->directive()->should_inline(callee_method)) { *wci_result = *(WarmCallInfo::always_hot()); if (C->print_inlining() && Verbose) { CompileTask::print_inline_indent(inline_level()); tty->print_cr("Inlined method is hot: "); } set_msg("force inline by CompileCommand"); _forced_inline = true; return true; } if (callee_method->force_inline()) { set_msg("force inline by annotation"); _forced_inline = true; return true; } int inline_depth = inline_level()+1; if (CacheProfiles && ciCacheReplay::should_inline(CompilerThread::current()->get_cache_replay_state(), callee_method, caller_bci, inline_depth)) { //tty->print_cr("force inline by ciCacheReplay"); set_msg("force inline by ciCacheReplay"); _forced_inline = true; return true; } #ifndef PRODUCT if (ciReplay::should_inline(C->replay_inline_data(), callee_method, caller_bci, inline_depth)) { set_msg("force inline by ciReplay"); _forced_inline = true; return true; } #endif int size = callee_method->code_size_for_inlining(); // Check for too many throws (and not too huge) if(callee_method->interpreter_throwout_count() > InlineThrowCount && size < InlineThrowMaxSize ) { wci_result->set_profit(wci_result->profit() * 100); if (C->print_inlining() && Verbose) { CompileTask::print_inline_indent(inline_level()); tty->print_cr("Inlined method with many throws (throws=%d):", callee_method->interpreter_throwout_count()); } set_msg("many throws"); return true; } int default_max_inline_size = C->max_inline_size(); int inline_small_code_size = InlineSmallCode / 4; int max_inline_size = default_max_inline_size; int call_site_count = method()->scale_count(profile.count()); int invoke_count = method()->interpreter_invocation_count(); assert(invoke_count != 0, "require invocation count greater than zero"); int freq = call_site_count / invoke_count; // bump the max size if the call is frequent if ((freq >= InlineFrequencyRatio) || (call_site_count >= InlineFrequencyCount) || is_unboxing_method(callee_method, C) || is_init_with_ea(callee_method, caller_method, C)) { max_inline_size = C->freq_inline_size(); if (size <= max_inline_size && TraceFrequencyInlining) { CompileTask::print_inline_indent(inline_level()); tty->print_cr("Inlined frequent method (freq=%d count=%d):", freq, call_site_count); CompileTask::print_inline_indent(inline_level()); callee_method->print(); tty->cr(); } } else { // Not hot. Check for medium-sized pre-existing nmethod at cold sites. if (callee_method->has_compiled_code() && callee_method->instructions_size() > inline_small_code_size) { // we force inlining when the caller is cached (to make sure that we replay correctly) // if (ciCacheProfiles::is_cached(caller_method->get_Method())) { // set_msg("force inline by ciCacheProfiles (over compiled into medium method)"); // _forced_inline = true; // return true; // } set_msg("already compiled into a medium method"); return false; } } if (size > max_inline_size) { if (max_inline_size > default_max_inline_size) { set_msg("hot method too big"); } else { set_msg("too big"); } return false; } return true; }
/************************************************* * Constructor for Config_Error * *************************************************/ Config_Error::Config_Error(const std::string& err, u32bit line) { set_msg("Config error at line " + to_string(line) + ": " + err); }
//-----------------------------try_to_inline----------------------------------- // return true if ok // Relocated from "InliningClosure::try_to_inline" bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, JVMState* jvms, ciCallProfile& profile, WarmCallInfo* wci_result, bool& should_delay) { if (ClipInlining && (int)count_inline_bcs() >= DesiredMethodLimit) { if (!callee_method->force_inline() || !IncrementalInline) { set_msg("size > DesiredMethodLimit"); return false; } else if (!C->inlining_incrementally()) { should_delay = true; } } _forced_inline = false; // Reset if (!should_inline(callee_method, caller_method, caller_bci, profile, wci_result)) { return false; } if (should_not_inline(callee_method, caller_method, jvms, wci_result)) { return false; } if (InlineAccessors && callee_method->is_accessor()) { // accessor methods are not subject to any of the following limits. set_msg("accessor"); return true; } // suppress a few checks for accessors and trivial methods if (callee_method->code_size() > MaxTrivialSize) { // don't inline into giant methods if (C->over_inlining_cutoff()) { if ((!callee_method->force_inline() && !caller_method->is_compiled_lambda_form()) || !IncrementalInline) { set_msg("NodeCountInliningCutoff"); return false; } else { should_delay = true; } } if ((!UseInterpreter || CompileTheWorld) && is_init_with_ea(callee_method, caller_method, C)) { // Escape Analysis stress testing when running Xcomp or CTW: // inline constructors even if they are not reached. } else if (forced_inline()) { // Inlining was forced by CompilerOracle, ciReplay or annotation } else if (profile.count() == 0) { // don't inline unreached call sites set_msg("call site not reached"); return false; } } if (!C->do_inlining() && InlineAccessors) { set_msg("not an accessor"); return false; } // Limit inlining depth in case inlining is forced or // _max_inline_level was increased to compensate for lambda forms. if (inline_level() > MaxForceInlineLevel) { set_msg("MaxForceInlineLevel"); return false; } if (inline_level() > _max_inline_level) { if (!callee_method->force_inline() || !IncrementalInline) { set_msg("inlining too deep"); return false; } else if (!C->inlining_incrementally()) { should_delay = true; } } // detect direct and indirect recursive inlining { // count the current method and the callee const bool is_compiled_lambda_form = callee_method->is_compiled_lambda_form(); int inline_level = 0; if (!is_compiled_lambda_form) { if (method() == callee_method) { inline_level++; } } // count callers of current method and callee Node* callee_argument0 = is_compiled_lambda_form ? jvms->map()->argument(jvms, 0)->uncast() : NULL; for (JVMState* j = jvms->caller(); j != NULL && j->has_method(); j = j->caller()) { if (j->method() == callee_method) { if (is_compiled_lambda_form) { // Since compiled lambda forms are heavily reused we allow recursive inlining. If it is truly // a recursion (using the same "receiver") we limit inlining otherwise we can easily blow the // compiler stack. Node* caller_argument0 = j->map()->argument(j, 0)->uncast(); if (caller_argument0 == callee_argument0) { inline_level++; } } else { inline_level++; } } } if (inline_level > MaxRecursiveInlineLevel) { set_msg("recursive inlining is too deep"); return false; } } int size = callee_method->code_size_for_inlining(); if (ClipInlining && (int)count_inline_bcs() + size >= DesiredMethodLimit) { if (!callee_method->force_inline() || !IncrementalInline) { set_msg("size > DesiredMethodLimit"); return false; } else if (!C->inlining_incrementally()) { should_delay = true; } } // ok, inline this method return true; }
/************************************************* * Constructor for Algorithm_Not_Found * *************************************************/ Algorithm_Not_Found::Algorithm_Not_Found(const std::string& name) { set_msg("Could not find any algorithm named \"" + name + "\""); }
/************************************************* * Constructor for Invalid_Algorithm_Name * *************************************************/ Invalid_Algorithm_Name::Invalid_Algorithm_Name(const std::string& name) { set_msg("Invalid algorithm name: " + name); }
/************************************************* * Constructor for Invalid_Message_Number * *************************************************/ Invalid_Message_Number::Invalid_Message_Number(const std::string& where, u32bit message_no) { set_msg("Pipe::" + where + ": Invalid message number " + to_string(message_no)); }
/************************************************* * Constructor for Invalid_IV_Length * *************************************************/ Invalid_IV_Length::Invalid_IV_Length(const std::string& mode, u32bit bad_len) { set_msg("IV length " + to_string(bad_len) + " is invalid for " + mode); }
/************************************************* * Constructor for Invalid_Block_Size * *************************************************/ Invalid_Block_Size::Invalid_Block_Size(const std::string& mode, const std::string& pad) { set_msg("Padding method " + pad + " cannot be used with " + mode); }
/************************************************* * Constructor for Invalid_Key_Length * *************************************************/ Invalid_Key_Length::Invalid_Key_Length(const std::string& name, u32bit length) { set_msg(name + " cannot accept a key of length " + to_string(length)); }
/* * Constructor for Invalid_Message_Number */ Pipe::Invalid_Message_Number::Invalid_Message_Number(const std::string& where, message_id msg) { set_msg("Pipe::" + where + ": Invalid message number " + to_string(msg)); }
//------------------------------ok_to_inline----------------------------------- WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms, ciCallProfile& profile, WarmCallInfo* initial_wci, bool& should_delay) { assert(callee_method != NULL, "caller checks for optimized virtual!"); assert(!should_delay, "should be initialized to false"); #ifdef ASSERT // Make sure the incoming jvms has the same information content as me. // This means that we can eventually make this whole class AllStatic. if (jvms->caller() == NULL) { assert(_caller_jvms == NULL, "redundant instance state"); } else { assert(_caller_jvms->same_calls_as(jvms->caller()), "redundant instance state"); } assert(_method == jvms->method(), "redundant instance state"); #endif int caller_bci = jvms->bci(); ciMethod* caller_method = jvms->method(); // Do some initial checks. if (!pass_initial_checks(caller_method, caller_bci, callee_method)) { set_msg("failed initial checks"); print_inlining(callee_method, caller_bci, caller_method, false /* !success */); return NULL; } // Do some parse checks. set_msg(check_can_parse(callee_method)); if (msg() != NULL) { print_inlining(callee_method, caller_bci, caller_method, false /* !success */); return NULL; } // Check if inlining policy says no. WarmCallInfo wci = *(initial_wci); bool success = try_to_inline(callee_method, caller_method, caller_bci, jvms, profile, &wci, should_delay); #ifndef PRODUCT if (InlineWarmCalls && (PrintOpto || C->print_inlining())) { bool cold = wci.is_cold(); bool hot = !cold && wci.is_hot(); bool old_cold = !success; if (old_cold != cold || (Verbose || WizardMode)) { if (msg() == NULL) { set_msg("OK"); } tty->print(" OldInlining= %4s : %s\n WCI=", old_cold ? "cold" : "hot", msg()); wci.print(); } } #endif if (success) { wci = *(WarmCallInfo::always_hot()); } else { wci = *(WarmCallInfo::always_cold()); } if (!InlineWarmCalls) { if (!wci.is_cold() && !wci.is_hot()) { // Do not inline the warm calls. wci = *(WarmCallInfo::always_cold()); } } if (!wci.is_cold()) { // Inline! if (msg() == NULL) { set_msg("inline (hot)"); } print_inlining(callee_method, caller_bci, caller_method, true /* success */); build_inline_tree_for_callee(callee_method, jvms, caller_bci); if (InlineWarmCalls && !wci.is_hot()) { return new (C) WarmCallInfo(wci); // copy to heap } return WarmCallInfo::always_hot(); } // Do not inline if (msg() == NULL) { set_msg("too cold to inline"); } print_inlining(callee_method, caller_bci, caller_method, false /* !success */ ); return NULL; }
/* Allocate various kinds of blocks. */ static byte * gs_heap_alloc_bytes(gs_memory_t * mem, uint size, client_name_t cname) { gs_malloc_memory_t *mmem = (gs_malloc_memory_t *) mem; byte *ptr = 0; #ifdef DEBUG const char *msg; static const char *const ok_msg = "OK"; # define set_msg(str) (msg = (str)) #else # define set_msg(str) DO_NOTHING #endif /* Exclusive acces so our decisions and changes are 'atomic' */ if (mmem->monitor) gx_monitor_enter(mmem->monitor); if (size > mmem->limit - sizeof(gs_malloc_block_t)) { /* Definitely too large to allocate; also avoids overflow. */ set_msg("exceeded limit"); } else { uint added = size + sizeof(gs_malloc_block_t); if (mmem->limit - added < mmem->used) set_msg("exceeded limit"); else if ((ptr = (byte *) malloc(added)) == 0) set_msg("failed"); else { gs_malloc_block_t *bp = (gs_malloc_block_t *) ptr; /* * We would like to check that malloc aligns blocks at least as * strictly as the compiler (as defined by ARCH_ALIGN_MEMORY_MOD). * However, Microsoft VC 6 does not satisfy this requirement. * See gsmemory.h for more explanation. */ set_msg(ok_msg); if (mmem->allocated) mmem->allocated->prev = bp; bp->next = mmem->allocated; bp->prev = 0; bp->size = size; bp->type = &st_bytes; bp->cname = cname; mmem->allocated = bp; ptr = (byte *) (bp + 1); mmem->used += size + sizeof(gs_malloc_block_t); if (mmem->used > mmem->max_used) mmem->max_used = mmem->used; } } if (mmem->monitor) gx_monitor_leave(mmem->monitor); /* Done with exclusive access */ /* We don't want to 'fill' under mutex to keep the window smaller */ if (ptr) gs_alloc_fill(ptr, gs_alloc_fill_alloc, size); #ifdef DEBUG if (gs_debug_c('a') || msg != ok_msg) dlprintf4("[a+]gs_malloc(%s)(%u) = 0x%lx: %s\n", client_name_string(cname), size, (ulong) ptr, msg); #endif return ptr; #undef set_msg }