/*! ****************************************************************************** @Function palloc_fnCompDisconnect ******************************************************************************/ static IMG_RESULT palloc_fnCompDisconnect ( IMG_HANDLE hAttachHandle, IMG_VOID * pvCompAttachmentData ) { PALLOC_sAttachContext * psAttachContext = pvCompAttachmentData; LOG_EVENT(PALLOC, PALLOC_COMPDISCONNECT, (LOG_FLAG_START), 0, 0); /* Destroy the bucket and it's resources...*/ RMAN_DestroyBucket(psAttachContext->hResBHandle); /* If we opened a device...*/ if (psAttachContext->hSysDevHandle != IMG_NULL) { SYSDEVKM_CloseDevice(psAttachContext->hSysDevHandle); } /* Free attachment context...*/ IMG_FREE(psAttachContext); LOG_EVENT(PALLOC, PALLOC_COMPDISCONNECT, (LOG_FLAG_END), 0, 0); /* Return success...*/ return IMG_SUCCESS; }
bool DocStream::CloseFile() { if (true == bgMode){ if(-1 == pclose(bg_pipe_fd)){ LOG_ERROR("Fail to close pipe."); return false; } if(-1 == fclose(bg_file_fd)){ LOG_ERROR("Fail to close file."); return false; } bg_pipe_fd = NULL; bg_file_fd = NULL; } else{ fclose(fd); } fd = NULL; // For debug only if (!bgMode){ char cmd[100]; sprintf(cmd, "rm -f %s", tmpFile); LOG_EVENT(cmd); // system(cmd); sprintf(cmd, "rm -f *.png *.jpg *.emf *.wmf *.pg"); LOG_EVENT(cmd); // system(cmd); } return true; }
/*! ****************************************************************************** @Function PALLOC_Free1 ******************************************************************************/ IMG_RESULT PALLOC_Free1( IMG_UINT32 ui32AllocId ) { IMG_UINT32 ui32Result; PALLOC_sKmAlloc * psKmAlloc; IMG_HANDLE hResHandle; IMG_HANDLE hDevHandle; LOG_EVENT(PALLOC, PALLOC_FREEID, (LOG_FLAG_START | LOG_FLAG_QUAL_ARG1), ui32AllocId, 0); /* Get the resource info from the id...*/ ui32Result = RMAN_GetResource(ui32AllocId, PALLOC_RES_TYPE_1, (IMG_VOID **)&psKmAlloc, &hResHandle); IMG_ASSERT(ui32Result == IMG_SUCCESS); if (ui32Result != IMG_SUCCESS) { return ui32Result; } hDevHandle = psKmAlloc->hDevHandle; /* Lock the device...*/ DMANKM_LockDeviceContext(hDevHandle); /* Free through the resource manager...*/ RMAN_FreeResource(hResHandle); /* Unlock the device...*/ DMANKM_UnlockDeviceContext(hDevHandle); LOG_EVENT(PALLOC, PALLOC_FREEID, (LOG_FLAG_END| LOG_FLAG_QUAL_ARG1), ui32AllocId, 0); /* Return IMG_SUCCESS...*/ return IMG_SUCCESS; }
/*! ****************************************************************************** @Function PALLOC_Initialise1 ******************************************************************************/ IMG_RESULT PALLOC_Initialise1(IMG_VOID) { LOG_EVENT(PALLOC, PALLOC_INITIALISE, (LOG_FLAG_START), 0, 0); LOG_EVENT(PALLOC, PALLOC_INITIALISE, (LOG_FLAG_END), 0, 0); /* Return success...*/ return IMG_SUCCESS; }
bool DocStream::OpenFile(const char* filename, bool background){ bgMode = background; char filen[std::strlen(filename) + 200]; std::memset(filen, '\0', std::strlen(filename) + 200); std::memcpy(filen, filename, std::strlen(filename)); AdjustCmd(filen, std::strlen(filename)); int strlength = std::strlen(filename) + 1; strlength += std::strlen(tmpFile); strlength += 256; char cmd[strlength]; std::memset(cmd, '\0', strlength); if (false == bgMode){ sprintf(cmd, "./wvWare -x ./wvHtml.xml -d %s -b wvImage %s > %s", work_dir, filen, tmpFile); LOG_EVENT(cmd); system(cmd); return OpenFileDirect(tmpFile); } else{ sprintf(cmd, "./wvWare -x ./wvHtml.xml -d %s -b wvImage %s | tee %s", work_dir, filen, tmpFile); LOG_EVENT(cmd); int32 retry = TIMES_RETRY; // Open pipe for processing // Sync for pipe fd ready while (!(bg_pipe_fd = popen(cmd, "r")) && retry >= 0){ usleep(TIME_WAIT); retry--; } if (!bg_pipe_fd){ LOG_ERROR("Fail to open pipe."); return false; } retry = TIMES_RETRY; // Sync for file fd ready if (!(bg_file_fd = fopen(tmpFile, "r")) && retry >= 0){ usleep(TIME_WAIT); retry--; } if (!bg_file_fd){ LOG_ERROR("Fail to open tmp file."); return false; } fd = bg_pipe_fd; // Sync for the first word int32 data; operator>>(data); operator<<(data); fileEnds = false; return true; } }
void req_checkpointjob( struct batch_request *preq) { job *pjob; int rc; attribute *pattr; if ((pjob = chk_job_request(preq->rq_ind.rq_manager.rq_objname, preq)) == NULL) { return; } if (is_cloud_job(pjob)) { rc = PBSE_CLOUD_REQUEST; req_reject(rc, 0, preq, NULL, "cloud jobs cannot be checkpointed"); } pattr = &pjob->ji_wattr[(int)JOB_ATR_checkpoint]; if ((pjob->ji_qs.ji_state == JOB_STATE_RUNNING) && ((pattr->at_flags & ATR_VFLAG_SET) && ((csv_find_string(pattr->at_val.at_str, "s") != NULL) || (csv_find_string(pattr->at_val.at_str, "c") != NULL) || (csv_find_string(pattr->at_val.at_str, "enabled") != NULL)))) { /* have MOM attempt checkpointing */ if ((rc = relay_to_mom(pjob->ji_qs.ji_un.ji_exect.ji_momaddr, preq, process_checkpoint_reply)) != 0) { req_reject(rc, 0, preq, NULL, NULL); } else { pjob->ji_qs.ji_svrflags |= JOB_SVFLG_CHECKPOINT_FILE; job_save(pjob, SAVEJOB_QUICK); LOG_EVENT(PBSEVENT_JOB, PBS_EVENTCLASS_JOB, pjob->ji_qs.ji_jobid, log_buffer); } } else { /* Job does not have checkpointing enabled, so reject the request */ LOG_EVENT( PBSEVENT_JOB, PBS_EVENTCLASS_JOB, pjob->ji_qs.ji_jobid, log_buffer); req_reject(PBSE_IVALREQ, 0, preq, NULL, "job is not checkpointable"); } } /* END req_checkpointjob() */
/*! ****************************************************************************** @Function palloc_fnFree ******************************************************************************/ static IMG_VOID palloc_fnFree( IMG_VOID * pvParam ) { PALLOC_sKmAlloc *psKmAlloc = (PALLOC_sKmAlloc *) pvParam; IMG_UINT numPages; LOG_EVENT(PALLOC, PALLOC_FREE, LOG_FLAG_START | LOG_FLAG_QUAL_ARG1, (IMG_UINT32) (IMG_UINTPTR) pvParam, 0); numPages = (psKmAlloc->sAllocInfo.ui32Size + SYS_MMU_PAGE_SIZE - 1)/SYS_MMU_PAGE_SIZE; switch (psKmAlloc->eBufType) { case PALLOC_BUFTYPE_PALLOCATED: /* If this is not a mapping only */ if (!psKmAlloc->sAllocInfo.bMappingOnly) { /* Free pages */ SYSMEMU_FreePages(psKmAlloc->hPagesHandle); } break; case PALLOC_BUFTYPE_USERALLOC: SYSOSKM_ReleaseCpuPAddrArray(((SYSMEMU_sPages *) psKmAlloc->hPagesHandle)->pvCpuKmAddr, psKmAlloc->hBufHandle, psKmAlloc->sAllocInfo.psSysPAddr, numPages); SYSMEMU_FreePages(psKmAlloc->hPagesHandle); break; case PALLOC_BUFTYPE_ANDROIDNATIVE: #if defined ANDROID_ION_BUFFERS palloc_ReleaseIONBuf(psKmAlloc->hBufHandle, NULL); SYSMEMU_FreePages(psKmAlloc->hPagesHandle); #elif defined ANDROID // Default gralloc: ashmem SYSOSKM_ReleaseCpuPAddrArray(((SYSMEMU_sPages *) psKmAlloc->hPagesHandle)->pvCpuKmAddr, psKmAlloc->hBufHandle, psKmAlloc->sAllocInfo.psSysPAddr, numPages); SYSMEMU_FreePages(psKmAlloc->hPagesHandle); #else IMG_ASSERT(!"palloc_fnFree wrong buffer type"); #endif break; default: IMG_ASSERT(!"palloc_fnFree wrong buffer type"); } IMG_BIGORSMALL_FREE(numPages * sizeof(IMG_SYS_PHYADDR), psKmAlloc->sAllocInfo.psSysPAddr); /* Free this structure */ IMG_FREE(psKmAlloc); LOG_EVENT(PALLOC, PALLOC_FREE, LOG_FLAG_END | LOG_FLAG_QUAL_ARG1, (IMG_UINT32) (IMG_UINTPTR) pvParam, 0); }
void ABTI_log_pool_remove(ABTI_pool *p_pool, ABT_unit unit, ABTI_xstream *p_consumer) { if (gp_ABTI_global->use_logging == ABT_FALSE) return; ABTI_thread *p_thread = NULL; ABTI_task *p_task = NULL; switch (p_pool->u_get_type(unit)) { case ABT_UNIT_TYPE_THREAD: p_thread = ABTI_thread_get_ptr(p_pool->u_get_thread(unit)); if (p_thread->p_last_xstream) { LOG_EVENT("[U%" PRIu64 ":E%" PRIu64 "] removed from " "P%" PRIu64 " (consumer: E%" PRIu64 ")\n", ABTI_thread_get_id(p_thread), p_thread->p_last_xstream->rank, p_pool->id, p_consumer->rank); } else { LOG_EVENT("[U%" PRIu64 "] removed from P%" PRIu64 " " "(consumer: E%" PRIu64 ")\n", ABTI_thread_get_id(p_thread), p_pool->id, p_consumer->rank); } break; case ABT_UNIT_TYPE_TASK: p_task = ABTI_task_get_ptr(p_pool->u_get_task(unit)); if (p_task->p_xstream) { LOG_EVENT("[T%" PRIu64 ":E%" PRIu64 "] removed from " "P%" PRIu64 " (consumer: E%" PRIu64 ")\n", ABTI_task_get_id(p_task), p_task->p_xstream->rank, p_pool->id, p_consumer->rank); } else { LOG_EVENT("[T%" PRIu64 "] removed from P%" PRIu64 " " "(consumer: E%" PRIu64 ")\n", ABTI_task_get_id(p_task), p_pool->id, p_consumer->rank); } break; default: ABTI_ASSERT(0); break; } }
/*! ****************************************************************************** @Function palloc_fnCompAttach ******************************************************************************/ static IMG_RESULT palloc_fnCompAttach( IMG_HANDLE hConnHandle, DMANKM_sCompAttach * psCompAttach ) { LOG_EVENT(PALLOC, PALLOC_COMPATTACH, (LOG_FLAG_START), 0, 0); psCompAttach->pfnCompConnect = palloc_fnCompConnect; psCompAttach->pfnCompDisconnect = palloc_fnCompDisconnect; LOG_EVENT(PALLOC, PALLOC_COMPATTACH, (LOG_FLAG_END), 0, 0); /* Return success...*/ return IMG_SUCCESS; }
int reroute_job( job *pjob) { int rc = PBSE_NONE; char log_buf[LOCAL_LOG_BUF_SIZE]; if (LOGLEVEL >= 8) { sprintf(log_buf, "%s", pjob->ji_qs.ji_jobid); LOG_EVENT(PBSEVENT_JOB, PBS_EVENTCLASS_JOB, __func__, log_buf); } rc = job_route(pjob); if (rc == PBSE_ROUTEREJ) job_abt(&pjob, pbse_to_txt(PBSE_ROUTEREJ)); else if (rc == PBSE_ROUTEEXPD) job_abt(&pjob, msg_routexceed); else if (rc == PBSE_QUENOEN) job_abt(&pjob, msg_err_noqueue); return(rc); } /* END reroute_job() */
//这个函数全是内存的操作,因此可以返回为NULL,不考虑它的出错,因此只要能够执行到该函数 //一定能够更新规则表 void GetRuleTask::apply_all_new_rule(std::list<cstore::Sys_Order>& order_list) { cstore::Sys_Order sys_order; int order_list_size = order_list.size(); uint32_t order_cmd = 0; uint32_t hash_version = 0; std::string order_content; if(0 == order_list_size) { LOG_EVENT("GET new rule , BUT empty order item !"); return ; } std::list<cstore::Sys_Order>::iterator it = order_list.begin(); std::list<cstore::Sys_Order>::iterator it_end = order_list.end(); for( ; it != it_end ; ++ it) { order_cmd = it->order_header(); hash_version = it->hash_version(); order_content = it->order_content(); deal_with_new_order_rule(order_cmd , order_content , hash_version); } }
static void post_delete_mom2( struct work_task *pwt) { job *pjob; char *sigk = "SIGKILL"; pjob = (job *)pwt->wt_parm1; if (pjob->ji_qs.ji_state == JOB_STATE_RUNNING) { issue_signal(pjob, sigk, release_req, 0); sprintf(log_buffer, msg_delrunjobsig, sigk); LOG_EVENT( PBSEVENT_JOB, PBS_EVENTCLASS_JOB, pjob->ji_qs.ji_jobid, log_buffer); } return; } /* END post_delete_mom2() */
int RuleInit::get_mod_from_db(Connection_T conn , RULE_TYPE type) { std::string get_mod_sql; get_mod_sql = ext_info_prefix + DBP->get_order_table_name(type) + " where command = \"" EXTENSION_COMMAND + "\"" + ext_info_mid; ResultSet_T rets = NULL; int rows = 0; if(!(rets = DBP->execute_query(conn , get_mod_sql))) { LOG_ERROR("RuleInit::get current mod from database error : " + get_mod_sql); return -1; } if(!ResultSet_next(rets)) { LOG_EVENT("RuleInit::Extent infomation empty !"); return 0; } if((rows = DBP->get_int_result(rets , 1)) < 0) { LOG_ERROR("RuleInit::get Extent infomation result error !"); return -1; } return rows; }
static void post_modify_req( struct work_task *pwt) { struct batch_request *preq; job *pjob; svr_disconnect(pwt->wt_event); /* close connection to MOM */ preq = pwt->wt_parm1; preq->rq_conn = preq->rq_orgconn; /* restore socket to client */ if ((preq->rq_reply.brp_code) && (preq->rq_reply.brp_code != PBSE_UNKJOBID)) { sprintf(log_buffer, msg_mombadmodify, preq->rq_reply.brp_code); log_event( PBSEVENT_JOB, PBS_EVENTCLASS_JOB, preq->rq_ind.rq_modify.rq_objname, log_buffer); req_reject(preq->rq_reply.brp_code, 0, preq, NULL, NULL); } else { if (preq->rq_reply.brp_code == PBSE_UNKJOBID) { if ((pjob = find_job(preq->rq_ind.rq_modify.rq_objname)) == NULL) { req_reject(preq->rq_reply.brp_code, 0, preq, NULL, NULL); return; } else { if (LOGLEVEL >= 0) { sprintf(log_buffer, "post_modify_req: PBSE_UNKJOBID for job %s in state %s-%s, dest = %s", (pjob->ji_qs.ji_jobid != NULL) ? pjob->ji_qs.ji_jobid : "", PJobState[pjob->ji_qs.ji_state], PJobSubState[pjob->ji_qs.ji_substate], pjob->ji_qs.ji_destin); LOG_EVENT( PBSEVENT_JOB, PBS_EVENTCLASS_JOB, pjob->ji_qs.ji_jobid, log_buffer); } } } reply_ack(preq); } return; } /* END post_modify_req() */
static int dis_reply_write( int sfds, /* I */ struct batch_reply *preply) /* I */ { int rc; /* setup for DIS over tcp */ DIS_tcp_setup(sfds); /* send message to remote client */ if ((rc = encode_DIS_reply(sfds, preply)) || (rc = DIS_tcp_wflush(sfds))) { sprintf(log_buffer, "DIS reply failure, %d", rc); LOG_EVENT( PBSEVENT_SYSTEM, PBS_EVENTCLASS_REQUEST, "dis_reply_write", log_buffer); close_conn(sfds); } return(rc); } /* END dis_reply_write() */
void chkpt_xfr_hold( struct work_task *ptask) { job *pjob; struct work_task *ptasknew; struct batch_request *preq; preq = (struct batch_request *)ptask->wt_parm1; pjob = (job *)preq->rq_extra; if (LOGLEVEL >= 7) { sprintf(log_buffer, "BLCR copy completed (state is %s-%s)", PJobState[pjob->ji_qs.ji_state], PJobSubState[pjob->ji_qs.ji_substate]); LOG_EVENT( PBSEVENT_JOB, PBS_EVENTCLASS_JOB, pjob->ji_qs.ji_jobid, log_buffer); } release_req(ptask); ptasknew = set_task(WORK_Immed, 0, mom_cleanup_checkpoint_hold, (void*)pjob); return; } /* END chkpt_xfr_hold() */
int reroute_job( job *pjob, pbs_queue *pque) { int rc = PBSE_NONE; char log_buf[LOCAL_LOG_BUF_SIZE]; if (LOGLEVEL >= 7) { sprintf(log_buf, "%s", pjob->ji_qs.ji_jobid); LOG_EVENT(PBSEVENT_JOB, PBS_EVENTCLASS_JOB, __func__, log_buf); } if ((pque != NULL) && (pque->qu_qs.qu_type == QTYPE_RoutePush)) { rc = job_route(pjob); if (rc == PBSE_ROUTEREJ) job_abt(&pjob, pbse_to_txt(PBSE_ROUTEREJ)); else if (rc == PBSE_ROUTEEXPD) job_abt(&pjob, msg_routexceed); else if (rc == PBSE_QUENOEN) job_abt(&pjob, msg_err_noqueue); } return(rc); } /* END reroute_job() */
static void process_checkpoint_reply( struct work_task *pwt) { job *pjob; struct batch_request *preq; svr_disconnect(pwt->wt_event); /* close connection to MOM */ preq = pwt->wt_parm1; preq->rq_conn = preq->rq_orgconn; /* restore client socket */ if ((pjob = find_job(preq->rq_ind.rq_manager.rq_objname)) == (job *)0) { LOG_EVENT(PBSEVENT_DEBUG, PBS_EVENTCLASS_JOB, preq->rq_ind.rq_manager.rq_objname, msg_postmomnojob); req_reject(PBSE_UNKJOBID, 0, preq, NULL, msg_postmomnojob); } else { /* record that MOM has a checkpoint file */ account_record(PBS_ACCT_CHKPNT, pjob, "Checkpointed"); /* note in accounting file */ reply_ack(preq); } }
void req_reject( int code, /* I */ int aux, /* I */ struct batch_request *preq, /* I */ char *HostName, /* I (optional) */ char *Msg) /* I (optional) */ { char msgbuf[ERR_MSG_SIZE + 256 + 1]; char msgbuf2[ERR_MSG_SIZE + 256 + 1]; set_err_msg(code, msgbuf); snprintf(msgbuf2, sizeof(msgbuf2), "%s", msgbuf); if ((HostName != NULL) && (*HostName != '\0')) { snprintf(msgbuf, sizeof(msgbuf), "%s REJHOST=%s", msgbuf2, HostName); snprintf(msgbuf2, sizeof(msgbuf2), "%s", msgbuf); } if ((Msg != NULL) && (*Msg != '\0')) { snprintf(msgbuf, sizeof(msgbuf), "%s MSG=%s", msgbuf2, Msg); /* NOTE: Don't need this last snprintf() unless another message is concatenated. */ } sprintf(log_buffer, "Reject reply code=%d(%s), aux=%d, type=%s, from %s@%s", code, msgbuf, aux, reqtype_to_txt(preq->rq_type), preq->rq_user, preq->rq_host); LOG_EVENT( PBSEVENT_DEBUG, PBS_EVENTCLASS_REQUEST, "req_reject", log_buffer); preq->rq_reply.brp_auxcode = aux; reply_text(preq, code, msgbuf); return; } /* END req_reject() */
int HeartbeatAgent::init() { m_time_stamp = time(NULL); HBManager::getInstance()->add_heartbeat_agent(this); LOG_EVENT("Heartbeat connection from IP : " + TCPAgent::m_Addr.ip + " AND Port : " + int_to_str(TCPAgent::m_Addr.port)); return 0; }
void controller_t::interrupt_handled() { the_group.ping_received[my_id] = true; //STAT_COUNT(INTERRUPT); #if 0 /*! \warning this logging event is bogus in a signal handler because * of the fact that log events are allocated by the freelist * allocator and the freelist allocator is not signal safe. */ double elapsed = ticks::microseconds_since(controller->date_of_last_interrupt); LOG_EVENT(COMM, new util::logging::interrupt_event_t(elapsed)); #endif }
pbs_queue *find_queuebyname( char *quename) /* I */ { char *pc; pbs_queue *pque = NULL; char qname[PBS_MAXDEST + 1]; char log_buf[LOCAL_LOG_BUF_SIZE+1]; int i; snprintf(qname, sizeof(qname), "%s", quename); if (LOGLEVEL >= 7) { sprintf(log_buf, "%s", quename); LOG_EVENT(PBSEVENT_JOB, PBS_EVENTCLASS_JOB, __func__, log_buf); } pc = strchr(qname, (int)'@'); /* strip off server (fragment) */ if (pc != NULL) *pc = '\0'; lock_allques_mutex(&svr_queues, __func__, NULL, LOGLEVEL); i = get_value_hash(svr_queues.ht,qname); if (i >= 0) { pque = svr_queues.ra->slots[i].item; } if (pque != NULL) lock_queue(pque, __func__, NULL, LOGLEVEL); unlock_allques_mutex(&svr_queues, __func__, NULL, LOGLEVEL); if (pque != NULL) { if (pque->q_being_recycled != FALSE) { unlock_queue(pque, __func__, "recycled queue", LOGLEVEL); pque = NULL; } } if (pc != NULL) *pc = '@'; /* restore '@' server portion */ return(pque); } /* END find_queuebyname() */
int ABTI_sched_free(ABTI_sched *p_sched) { int abt_errno = ABT_SUCCESS; int p; /* If sched is currently used, free is not allowed. */ if (p_sched->used != ABTI_SCHED_NOT_USED) { abt_errno = ABT_ERR_SCHED; goto fn_fail; } /* If sched is a default provided one, it should free its pool here. * Otherwise, freeing the pool is the user's reponsibility. */ for (p = 0; p < p_sched->num_pools; p++) { ABTI_pool *p_pool = ABTI_pool_get_ptr(p_sched->pools[p]); int32_t num_scheds = ABTI_pool_release(p_pool); if (p_pool->automatic == ABT_TRUE && num_scheds == 0) { abt_errno = ABT_pool_free(p_sched->pools+p); ABTI_CHECK_ERROR(abt_errno); } } ABTU_free(p_sched->pools); /* Free the associated work unit */ if (p_sched->type == ABT_SCHED_TYPE_ULT) { if (p_sched->p_thread) { if (p_sched->p_thread->type == ABTI_THREAD_TYPE_MAIN_SCHED) { ABTI_thread_free_main_sched(p_sched->p_thread); } else { ABTI_thread_free(p_sched->p_thread); } } } else if (p_sched->type == ABT_SCHED_TYPE_TASK) { if (p_sched->p_task) { ABTI_task_free(p_sched->p_task); } } LOG_EVENT("[S%" PRIu64 "] freed\n", p_sched->id); p_sched->free(ABTI_sched_get_handle(p_sched)); p_sched->data = NULL; ABTU_free(p_sched); fn_exit: return abt_errno; fn_fail: HANDLE_ERROR_FUNC_WITH_CODE(abt_errno); goto fn_exit; }
/*! ****************************************************************************** @Function PALLOC_AttachToConnection ******************************************************************************/ IMG_RESULT PALLOC_AttachToConnection( IMG_UINT32 ui32ConnId, IMG_UINT32 __user * pui32AttachId ) { IMG_HANDLE hDevHandle; IMG_UINT32 ui32Result; IMG_HANDLE hConnHandle; IMG_UINT32 ui32AttachId; LOG_EVENT(PALLOC, PALLOC_ATTACH, (LOG_FLAG_START), 0, 0); /* Get the connection handle from it's ID...*/ ui32Result = DMANKM_GetConnHandleFromId(ui32ConnId, &hConnHandle); IMG_ASSERT(ui32Result == IMG_SUCCESS); if (ui32Result != IMG_SUCCESS) { return ui32Result; } /* Get the device handle from the connection...*/ hDevHandle = DMANKM_GetDevHandleFromConn(hConnHandle); /* Lock the device...*/ DMANKM_LockDeviceContext(hDevHandle); /* Call on to the kernel function...*/ ui32Result = DMANKM_AttachComponent(hConnHandle, "PALLOCBRG", palloc_fnCompAttach, IMG_NULL, &ui32AttachId); IMG_ASSERT(ui32Result == IMG_SUCCESS); /* Unlock the device...*/ DMANKM_UnlockDeviceContext(hDevHandle); SYSOSKM_CopyToUser(pui32AttachId, &ui32AttachId, sizeof(ui32AttachId)); LOG_EVENT(PALLOC, PALLOC_ATTACH, (LOG_FLAG_END), 0, 0); /* Return ui32Result...*/ return ui32Result; }
int remove_job( struct all_jobs *aj, job *pjob) { int rc = PBSE_NONE; int index; if (pjob == NULL) { rc = PBSE_BAD_PARAMETER; log_err(rc,__func__,"null input job pointer fail"); return(rc); } if (aj == NULL) { rc = PBSE_BAD_PARAMETER; log_err(rc,__func__,"null input array pointer fail"); return(rc); } if (LOGLEVEL >= 10) LOG_EVENT(PBSEVENT_JOB, PBS_EVENTCLASS_JOB, __func__, pjob->ji_qs.ji_jobid); if (pthread_mutex_trylock(aj->alljobs_mutex)) { unlock_ji_mutex(pjob, __func__, "1", LOGLEVEL); pthread_mutex_lock(aj->alljobs_mutex); lock_ji_mutex(pjob, __func__, NULL, LOGLEVEL); if (pjob->ji_being_recycled == TRUE) { pthread_mutex_unlock(aj->alljobs_mutex); unlock_ji_mutex(pjob, __func__, "1", LOGLEVEL); return(PBSE_JOB_RECYCLED); } } if ((index = get_value_hash(aj->ht,pjob->ji_qs.ji_jobid)) < 0) rc = THING_NOT_FOUND; else { remove_thing_from_index(aj->ra,index); remove_hash(aj->ht,pjob->ji_qs.ji_jobid); } pthread_mutex_unlock(aj->alljobs_mutex); return(rc); } /* END remove_job() */
int HeartbeatAgent::destory() { LOG_EVENT("Heartbeat Closed from IP " + TCPAgent::m_Addr.ip + " AND Port : "+ ushort_to_str(TCPAgent::m_Addr.port) + " Node type is " + type_str[m_type]); if(TCPAgent::recycler() < 0) { LOG_ERROR("HeartbeatAgent::recycler agent error !"); return -1; } return 0; }
int close_quejob_by_jobid( char *job_id) { int rc = PBSE_NONE; job *pjob = NULL; if (LOGLEVEL >= 10) { LOG_EVENT(PBSEVENT_JOB, PBS_EVENTCLASS_JOB, __func__, job_id); } if ((pjob = svr_find_job(job_id, FALSE)) == NULL) { rc = PBSE_JOBNOTFOUND; return(rc); } mutex_mgr pjob_mutex = mutex_mgr(pjob->ji_mutex, true); if (pjob->ji_qs.ji_substate != JOB_SUBSTATE_TRANSICM) { remove_job(&newjobs,pjob); svr_job_purge(pjob); pjob = NULL; } else if (pjob->ji_qs.ji_svrflags & JOB_SVFLG_HERE) { remove_job(&newjobs,pjob); pjob->ji_qs.ji_state = JOB_STATE_QUEUED; pjob->ji_qs.ji_substate = JOB_SUBSTATE_QUEUED; rc = svr_enquejob(pjob, FALSE, -1, false); if ((rc == PBSE_JOBNOTFOUND) || (rc == PBSE_JOB_RECYCLED)) { pjob = NULL; } else if (rc != PBSE_NONE) { job_abt(&pjob, msg_err_noqueue); pjob = NULL; } } if (pjob == NULL) pjob_mutex.set_lock_on_exit(false); return(rc); } /* close_quejob_by_jobid() */
//根据回复更新RS的规则,这里需要保证更新的原子性 int GetRuleTask::get_new_rule_ack(InReq *req) { MsgHeader header = req->m_msgHeader; std::string in_data(req->ioBuf , header.length); //如果RS已经是最新的规则了,那么这里就会返回空的负载数据 if(0 == header.length) { LOG_EVENT("Get new rule from CS back , empty load data !"); return 0; } cstore::pb_MSG_RS_CS_UPDATE_MU_HASH_ACK in_mu_ack; cstore::pb_MSG_RS_CS_UPDATE_SU_HASH_ACK in_su_ack; int sys_order_number = 0; std::list<cstore::Sys_Order> sys_order_list; //首先将所有的版本更新的信息保存在list中,再一一更新 if(MU_RULER == m_type) { if(!in_mu_ack.ParseFromString(in_data)) { LOG_ERROR("GetRuleTask::parse mu ack from protbuf error !"); return -1; } sys_order_number = in_mu_ack.sys_order_size(); for(int i = 0 ; i < sys_order_number ; ++ i) { sys_order_list.push_back(in_mu_ack.sys_order(i)); } } else { if(!in_su_ack.ParseFromString(in_data)) { LOG_ERROR("GetRuleTask::parse su ack from protbuf error !"); return -1; } sys_order_number = in_su_ack.sys_order_size(); for(int i = 0 ; i < sys_order_number ; ++ i) { sys_order_list.push_back(in_su_ack.sys_order(i)); } } apply_all_new_rule(sys_order_list); return 0; }
int HeartbeatAgent::recycler() { HBManager::getInstance()->heartbeat_closed(this); LOG_EVENT("Heartbeat Closed from IP " + TCPAgent::m_Addr.ip + " AND Port : "+ ushort_to_str(TCPAgent::m_Addr.port) + " Node type is " + type_str[m_type]); if(TCPAgent::recycler() < 0) { LOG_ERROR("HeartbeatAgent::recycler agent error !"); return -1; } return 0; }
static int32 ctrl_z_handler(int sig) { if (InterruptedContext::the_interrupted_context->forwarded_to_self_thread(sig)) return 0; FlagSettingInt fs(errno, 0); // save errno if (sig == SIGTSTP) { OS::handle_suspend_and_resume(true); LOG_EVENT("Control-Z"); kill(0, SIGSTOP); return 0; } else { OS::handle_suspend_and_resume(false); return 0; } }