int uttfile_data_for(uttfile_t *uf, const char *id, char *buf, uint32 max_sz) { uint32 off, i; off = uf->off; for (i = off; i < uf->len; i++) { if (uttfile_data_at(uf, i, buf, max_sz) != S3_SUCCESS) { return S3_ERROR; } if (strcmp(id_of(buf), id) == 0) { return TRUE; } } for (i = 0; i < off; i++) { if (uttfile_data_at(uf, i, buf, max_sz) != S3_SUCCESS) { return S3_ERROR; } if (strcmp(id_of(buf), id) == 0) { return TRUE; } } return FALSE; }
void binary_serializer::begin_object(const uniform_type_info* uti) { CPPA_REQUIRE(uti != nullptr); auto ot = outgoing_types(); std::uint32_t id = (ot) ? ot->id_of(uti) : 0; std::uint8_t flag = (id == 0) ? 1 : 0; binary_writer::write_int(m_sink, flag); if (flag == 1) binary_writer::write_string(m_sink, uti->name()); else binary_writer::write_int(m_sink, id); }
void run() { CAF_SET_LOGGER_SYS(&system()); CAF_LOG_TRACE(CAF_ARG(id_)); // scheduling loop for (;;) { auto job = policy_.dequeue(this); CAF_ASSERT(job != nullptr); CAF_ASSERT(job->subtype() != resumable::io_actor); CAF_LOG_DEBUG("resume actor:" << CAF_ARG(id_of(job))); CAF_PUSH_AID_FROM_PTR(dynamic_cast<abstract_actor*>(job)); policy_.before_resume(this, job); auto res = job->resume(this, max_throughput_); policy_.after_resume(this, job); switch (res) { case resumable::resume_later: { // keep reference to this actor, as it remains in the "loop" policy_.resume_job_later(this, job); break; } case resumable::done: { policy_.after_completion(this, job); intrusive_ptr_release(job); break; } case resumable::awaiting_message: { // resumable will maybe be enqueued again later, deref it for now intrusive_ptr_release(job); break; } case resumable::shutdown_execution_unit: { policy_.after_completion(this, job); policy_.before_shutdown(this); return; } } } }
void run() { CAF_LOG_TRACE("worker with ID " << m_id); // scheduling loop for (;;) { auto job = m_queue_policy.internal_dequeue(this); CAF_REQUIRE(job != nullptr); CAF_LOG_DEBUG("resume actor " << id_of(job)); CAF_PUSH_AID_FROM_PTR(dynamic_cast<abstract_actor*>(job)); switch (job->resume(this)) { case resumable::done: { job->detach_from_scheduler(); break; } case resumable::resume_later: { break; } case resumable::shutdown_execution_unit: { m_queue_policy.clear_internal_queue(this); return; } } m_queue_policy.assert_stealable(this); } }
void _delvec(QSP_ARG_DECL Data_Obj *dp) { assert(dp!=NULL); assert(OBJ_NAME(dp)!=NULL); #ifdef ZOMBIE_SUPPORT // This should go back in eventually! if( OBJ_FLAGS(dp) & DT_STATIC && OWNS_DATA(dp) ){ sprintf(ERROR_STRING,"delvec: static object %s will be made a zombie",OBJ_NAME(dp)); advise(ERROR_STRING); make_zombie(dp); return; } if( OBJ_REFCOUNT(dp) > 0 ){ /* This zombie business was introduced at a time * when a displayed image had to be kept around * to refresh its window... with the current * X windows implementation of viewers that is * no longer the case, so this may be unecessary... * * But another case arises when we have a static * object in the expression language, that gets * deleted outside of the expression language. * This shouldn't be done, but we don't want to * be able to crash the program either... */ sprintf(ERROR_STRING,"delvec: object %s (refcount = %d) will be made a zombie",OBJ_NAME(dp),dp->dt_refcount); advise(ERROR_STRING); make_zombie(dp); return; } #endif /* ZOMBIE_SUPPORT */ // If the object has been exported, we need to delete // the associated identifier... // // BUT if it was exported, then it may be referenced!? // So it should be static... // // If we have references, and are therefore keeping the // object as a zombie, then we don't want to delete the // identifier, and we probably don't want to change the // object's name... if( IS_EXPORTED(dp) ){ Identifier *idp; idp = id_of(OBJ_NAME(dp)); assert( idp != NULL ); delete_id((Item *)idp); } if( OBJ_CHILDREN( dp ) != NULL ){ delete_subobjects(dp); } if( OBJ_PARENT(dp) != NULL ){ disown_child(dp); } if( IS_TEMP(dp) ){ if( OBJ_DECLFILE(dp) != NULL ){ sprintf(ERROR_STRING,"delvec %s: temp object has declfile %s!?\n", OBJ_NAME(dp),OBJ_DECLFILE(dp)); advise(ERROR_STRING); } release_tmp_obj(dp); /* * Most likely called when parent is deleted. * Temp objects are not hashed, and are not dynamically * allocated. * * Simply mark as free by clearing name field. */ return; } // We might clean this up by making the release // function a platform member... if( OWNS_DATA(dp) ){ if( ! UNKNOWN_SHAPE(OBJ_SHAPE(dp)) ){ release_data(dp); } } // In the first OpenCL implementation, we used subbuffers, which had // to be released here even for subimages. But now we handle subobjects // ourselves, managing offsets, so non-data-owners don't need to release. rls_shape( OBJ_SHAPE(dp) ); // We don't need to do this if we have garbage collection? /* The name might be null if we had an error creating the object... */ if( OBJ_DECLFILE(dp) != NULL ){ rls_str( OBJ_DECLFILE(dp) ); } #ifdef ZOMBIE_SUPPORT /* BUG context code assumes that this is really deleted... */ // not sure I understand the above comment? if( IS_ZOMBIE(dp) ){ // The object is a zombie that is no longer referenced... /* NOTE: we used to release the struct here with givbuf, * but in the current implementation of the item package, * objects aren't allocated with getbuf! */ // The object has already been removed from the dictionary, // so we don't need to call del_item... /* put this back on the free list... */ recycle_item(dobj_itp,dp); } else { del_item(dobj_itp, dp ); } #else /* ! ZOMBIE_SUPPORT */ DELETE_OBJ_ITEM(dp); // del_dobj - item function #endif /* ! ZOMBIE_SUPPORT */ // used to release the name here // and set to null, but that is done in del_item }
void after_completion(Worker* worker, resumable* job) { Policy::after_completion(worker, job); auto parent = static_cast<coordinator_type*>(worker->parent()); parent->remove_job(id_of(job)); }
void after_resume(Worker* worker, resumable* job) { Policy::after_resume(worker, job); auto parent = static_cast<coordinator_type*>(worker->parent()); parent->stop_measuring(worker->id(), id_of(job)); }
static void _dump_node_basic(QSP_ARG_DECL Vec_Expr_Node *enp) { Tree_Code code; int i; const char *s; if( enp==NULL ) return; /* print the node "name", and a code that tells about shape knowledge */ // Temporarily print to stderr instead of stdout for debugging... prt_node(enp,msg_str); prt_msg_frag(msg_str); if( SHOWING_LHS_REFS ){ sprintf(msg_str,"\t%d",VN_LHS_REFS(enp)); prt_msg_frag(msg_str); } if( SHOWING_COST ){ if( VN_SHAPE(enp) != NULL ){ sprintf(msg_str,"\t%d", SHP_N_MACH_ELTS(VN_SHAPE(enp))); } prt_msg_frag(msg_str); sprintf(msg_str,"\t%d\t%d", VN_FLOPS(enp),VN_N_MATH(enp)); prt_msg_frag(msg_str); } if( IS_CURDLED(enp) ){ sprintf(msg_str,"\t%s (curdled!?)", NNAME(enp)); prt_msg(msg_str); return; } sprintf(msg_str,"\t%s", NNAME(enp)); prt_msg_frag(msg_str); /* print the special op-dependent args in human-readable form */ code = VN_CODE(enp); if( code==T_DYN_OBJ || code == T_UNDEF || code == T_PROTO || code==T_POINTER || code==T_FUNCPTR || code==T_STR_PTR ){ sprintf(msg_str,"\t%s",VN_STRING(enp)); prt_msg_frag(msg_str); if( code == T_POINTER ){ Identifier *idp; /* We don't use get_set_ptr() here because we don't want an error msg... */ idp = id_of(VN_STRING(enp)); if( idp != NULL && IS_POINTER(idp) && POINTER_IS_SET(idp) ){ if( PTR_REF(ID_PTR(idp)) == NULL ){ /* how could this ever happen??? */ prt_msg_frag("->???"); } else { Data_Obj *dp; dp = REF_OBJ(PTR_REF(ID_PTR(idp))); sprintf(msg_str,"->%s",OBJ_NAME(dp)); prt_msg_frag(msg_str); } } } } else if( code == T_STATIC_OBJ ){ sprintf(msg_str,"\t%s",OBJ_NAME(VN_OBJ(enp))); prt_msg_frag(msg_str); #ifdef SCALARS_NOT_OBJECTS } else if( code == T_SCALAR_VAR ){ sprintf(msg_str,"\t%s",VN_STRING(enp)); prt_msg_frag(msg_str); #endif // SCALARS_NOT_OBJECTS } else if ( code == T_FUNCREF ){ Subrt *srp; srp=VN_SUBRT(enp); sprintf(msg_str,"\t%s",SR_NAME(srp)); prt_msg_frag(msg_str); } else if( code == T_SIZE_FN ){ sprintf(msg_str,"\t%s",FUNC_NAME(VN_FUNC_PTR(enp))); prt_msg_frag(msg_str); } #ifdef NOT_YET else if(code == T_CALL_NATIVE ){ // was kw_token??? // curr_native_func_tbl... sprintf(msg_str,"\t%s",FUNC_NAME(VN_FUNC_PTR(enp))); prt_msg_frag(msg_str); } #endif /* NOT_YET */ else if(code == T_TYPECAST ){ // BUG not how we do precision any more!!! //sprintf(msg_str," %s",NAME_FOR_PREC_CODE(VN_INTVAL(enp))); if( VN_SHAPE(enp) == NULL ) error1("CAUTIOUS: null node shape for typecast node!?"); else { sprintf(msg_str," %s",PREC_NAME(VN_PREC_PTR(enp))); prt_msg_frag(msg_str); } } else if( code == T_SUBRT_DECL || code == T_SCRIPT ){ Subrt *srp; srp=VN_SUBRT(enp); sprintf(msg_str,"\t%s",SR_NAME(srp)); prt_msg_frag(msg_str); } else if( code==T_DECL_STAT ){ //sprintf(msg_str," %s",NAME_FOR_PREC_CODE(VN_INTVAL(enp))); sprintf(msg_str," %s",PREC_NAME(VN_DECL_PREC(enp))); prt_msg_frag(msg_str); } else if( IS_DECL(code) ){ sprintf(msg_str," %s",VN_STRING(enp)); prt_msg_frag(msg_str); } else if( code==T_ADVISE ){ /* BUG need to elim yylex_qsp */ s=eval_string(VN_CHILD(enp,0)); sprintf(msg_str,"\t\"%s\"",s); prt_msg_frag(msg_str); } else if( code==T_WARN ){ /* BUG need to elim yylex_qsp */ s=eval_string(VN_CHILD(enp,0)); sprintf(msg_str,"\t\"%s\"",s); prt_msg_frag(msg_str); } else if( code==T_STRING ){ sprintf(msg_str,"\t\"%s\"",VN_STRING(enp)); prt_msg_frag(msg_str); } else if( code == T_LABEL || code ==T_GO_BACK || code == T_GO_FWD ){ sprintf(msg_str," %s",VN_STRING(enp)); prt_msg_frag(msg_str); } else if( code==T_LIT_DBL ){ sprintf(msg_str," %g",VN_DBLVAL(enp)); prt_msg_frag(msg_str); } else if( code == T_MATH0_FN ){ sprintf(msg_str," %s",FUNC_NAME(VN_FUNC_PTR(enp))); prt_msg_frag(msg_str); } else if( code == T_MATH1_FN ){ sprintf(msg_str," %s",FUNC_NAME(VN_FUNC_PTR(enp))); prt_msg_frag(msg_str); } else if( code == T_MATH2_FN ){ sprintf(msg_str," %s",FUNC_NAME(VN_FUNC_PTR(enp))); prt_msg_frag(msg_str); } else if ( code == T_MATH0_VFN || code == T_MATH1_VFN || code == T_MATH2_VFN || code == T_MATH2_VSFN || code == T_CHAR_VFN /* BUG? shouldn't there bre a VSFN2 ??? */ || code == T_VS_FUNC || code == T_VV_FUNC ){ sprintf(msg_str," %s",VF_NAME(FIND_VEC_FUNC(VN_VFUNC_CODE(enp)))); prt_msg_frag(msg_str); } else if( code==T_CALLFUNC ){ assert(VN_SUBRT(enp)!=NULL); sprintf(msg_str," %s", SR_NAME(VN_SUBRT(enp))); prt_msg_frag(msg_str); } else if( code==T_LIT_INT ){ sprintf(msg_str," %"PRId64, VN_INTVAL(enp) ); prt_msg_frag(msg_str); } else if( code==T_ASSIGN ){ prt_msg_frag("\t"); } else if( code==T_MAXVAL ){ prt_msg_frag("\t"); } else if( code==T_MINVAL ){ prt_msg_frag("\t"); } else if( code==T_RAMP ){ prt_msg_frag("\t"); } /* Now print the addresses of the child nodes */ if( VN_CHILD(enp,0)!=NULL){ sprintf(msg_str,"\t\tn%d",VN_SERIAL(VN_CHILD(enp,0))); prt_msg_frag(msg_str); } for(i=1;i<MAX_CHILDREN(enp);i++){ if( VN_CHILD(enp,i)!=NULL){ sprintf(msg_str,", n%d",VN_SERIAL(VN_CHILD(enp,i))); prt_msg_frag(msg_str); } } prt_msg(""); if( SHOWING_SHAPES && VN_SHAPE(enp) != NULL ){ prt_msg_frag("\t"); if( OWNS_SHAPE(enp) ){ sprintf(msg_str,"* 0x%lx ",(u_long)VN_SHAPE(enp)); prt_msg_frag(msg_str); } else { sprintf(msg_str,"@ 0x%lx ",(u_long)VN_SHAPE(enp)); prt_msg_frag(msg_str); } prt_msg_frag("\t"); describe_shape(VN_SHAPE(enp)); } if( SHOWING_RESOLVERS && VN_RESOLVERS(enp)!=NULL ){ Node *np; Vec_Expr_Node *enp2; prt_msg("\tResolvers:"); np=QLIST_HEAD(VN_RESOLVERS(enp)); while(np!=NULL){ enp2=(Vec_Expr_Node *)NODE_DATA(np); sprintf(msg_str,"\t\t%s",node_desc(enp2)); prt_msg(msg_str); np=NODE_NEXT(np); } } }
// go on a raid in quest for a shiny new job job_ptr raid() { auto result = m_steal_policy.raid(this); CAF_LOG_DEBUG_IF(result, "got actor with id " << id_of(result)); return result; }
/** * Enqueues a new job to the worker's queue from an internal * source, i.e., a job that is currently executed by this worker. * @warning Must not be called from other threads. */ void exec_later(job_ptr job) override { CAF_REQUIRE(job != nullptr); CAF_LOG_TRACE("id = " << id() << " actor id " << id_of(job)); m_queue_policy.internal_enqueue(this, job); }
/** * Attempt to steal an element from the exposed job queue. */ job_ptr try_steal() override { auto result = m_queue_policy.try_external_dequeue(this); CAF_LOG_DEBUG_IF(result, "stole actor with id " << id_of(result)); return result; }
/// Enqueues a new job to the worker's queue from an internal /// source, i.e., a job that is currently executed by this worker. /// @warning Must not be called from other threads. void exec_later(job_ptr job) override { CAF_ASSERT(job != nullptr); CAF_LOG_TRACE(CAF_ARG(id()) << CAF_ARG(id_of(job))); policy_.internal_enqueue(this, job); }
/// Enqueues a new job to the worker's queue from an external /// source, i.e., from any other thread. void external_enqueue(job_ptr job) { CAF_ASSERT(job != nullptr); CAF_LOG_TRACE(CAF_ARG(id()) << CAF_ARG(id_of(job))); policy_.external_enqueue(this, job); }