Boolean MessageQueueService::SendForget(Message *msg)
{
   AsyncOpNode *op = 0;
   Uint32 mask = msg->getMask();

   if (mask & message_mask::ha_async)
   {
      op = (static_cast<AsyncMessage *>(msg))->op ;
   }

   if (op == 0)
   {
      op = get_op();
      op->_request.insert_first(msg);
      if (mask & message_mask::ha_async)
      {
         (static_cast<AsyncMessage *>(msg))->op = op;
      }
   }
   op->_op_dest = MessageQueue::lookup(msg->dest);
   op->_flags |= ASYNC_OPFLAGS_FIRE_AND_FORGET;
   op->_flags &= ~(ASYNC_OPFLAGS_CALLBACK | ASYNC_OPFLAGS_SAFE_CALLBACK
       | ASYNC_OPFLAGS_SIMPLE_STATUS);
   op->_state &= ~ASYNC_OPSTATE_COMPLETE;
   if (op->_op_dest == 0)
   {
      op->release();
      return_op(op);
      return false;
   }

   // now see if the meta dispatcher will take it
   return  _meta_dispatcher->route_async(op);
}
Boolean MessageQueueService::SendAsync(
    Message *msg,
    Uint32 destination,
    void (*callback)(Message *response, void *handle, void *parameter),
    void *handle,
    void *parameter)
{
   if (msg == NULL)
      return false;
   if (callback == NULL)
      return SendForget(msg);
   AsyncOpNode *op = get_op();
   msg->dest = destination;
   if (NULL == (op->_op_dest = MessageQueue::lookup(msg->dest)))
   {
      op->release();
      return_op(op);
      return false;
   }
   op->_flags |= ASYNC_OPFLAGS_SAFE_CALLBACK;
   op->_flags &= ~(ASYNC_OPFLAGS_FIRE_AND_FORGET);
   op->_state &= ~ASYNC_OPSTATE_COMPLETE;
   op->__async_callback = callback;
   op->_callback_node = op;
   op->_callback_handle = handle;
   op->_callback_parameter = parameter;
   op->_callback_response_q = this;

   if (!(msg->getMask() & message_mask::ha_async))
   {
      AsyncLegacyOperationStart *wrapper = new AsyncLegacyOperationStart(
         get_next_xid(),
         op,
         destination,
         msg,
         destination);
   }
   else
   {
      op->_request.insert_first(msg);
      (static_cast<AsyncMessage *>(msg))->op = op;
   }
   return _meta_dispatcher->route_async(op);
}
void MessageQueueService::_handle_incoming_operation(AsyncOpNode *operation)
{
   if (operation != 0)
   {

// ATTN: optimization
// << Tue Feb 19 14:10:38 2002 mdd >>
      operation->lock();

      Message *rq = operation->_request.next(0);

// optimization <<< Thu Mar  7 21:04:05 2002 mdd >>>
// move this to the bottom of the loop when the majority of
// messages become async messages.

      // divert legacy messages to handleEnqueue
      if ((rq != 0) && (!(rq->getMask() & message_mask::ha_async)))
      {
         rq = operation->_request.remove_first() ;
         operation->unlock();
         // delete the op node
         operation->release();
         return_op(operation);

         handleEnqueue(rq);
         return;
      }

      if ((operation->_flags & ASYNC_OPFLAGS_CALLBACK ||
           operation->_flags & ASYNC_OPFLAGS_SAFE_CALLBACK) &&
          (operation->_state & ASYNC_OPSTATE_COMPLETE))
      {
         operation->unlock();
         _handle_async_callback(operation);
      }
      else
      {
         PEGASUS_ASSERT(rq != 0);
         operation->unlock();
         _handle_async_request(static_cast<AsyncRequest *>(rq));
      }
   }
   return;
}
示例#4
0
void test_service::_handle_incoming_operation(AsyncOpNode *operation)
{
   if ( operation != 0 )
   {
      Message *rq = operation->get_request();

      PEGASUS_ASSERT(rq != 0 );
      if ( rq && (rq->getMask() & message_mask::ha_async))
      {
	 _handle_async_request(static_cast<AsyncRequest *>(rq));
      }
      else 
      {
	 delete rq;
	 operation->release();
	 return_op(operation);
      }
   }
   return;
}
AsyncReply *MessageQueueService::SendWait(AsyncRequest *request)
{
   if (request == 0)
      return 0 ;

   Boolean destroy_op = false;

   if (request->op == 0)
   {
      request->op = get_op();
      request->op->_request.insert_first(request);
      destroy_op = true;
   }

   request->block = false;
   request->op->_flags |= ASYNC_OPFLAGS_PSEUDO_CALLBACK;
   SendAsync(
      request->op,
      request->dest,
      _sendwait_callback,
      this,
      (void *)0);

   request->op->_client_sem.wait();

   request->op->lock();
   AsyncReply * rpl = static_cast<AsyncReply *>(request->op->_response.remove_first());
   rpl->op = 0;
   request->op->unlock();

   if (destroy_op == true)
   {
      request->op->lock();
      request->op->_request.remove(request);
      request->op->_state |= ASYNC_OPSTATE_RELEASED;
      request->op->unlock();
      return_op(request->op);
      request->op = 0;
   }
   return rpl;
}
void LIR_Assembler::emit_op1(LIR_Op1* op) {
    switch (op->code()) {
    case lir_move:
        if (op->move_kind() == lir_move_volatile) {
            assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
            volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
        } else {
            move_op(op->in_opr(), op->result_opr(), op->type(),
                    op->patch_code(), op->info(), op->pop_fpu_stack(),
                    op->move_kind() == lir_move_unaligned,
                    op->move_kind() == lir_move_wide);
        }
        break;

    case lir_roundfp: {
        LIR_OpRoundFP* round_op = op->as_OpRoundFP();
        roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
        break;
    }

    case lir_return:
        return_op(op->in_opr());
        break;

    case lir_safepoint:
        if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
            _masm->nop();
        }
        safepoint_poll(op->in_opr(), op->info());
        break;

    case lir_fxch:
        fxch(op->in_opr()->as_jint());
        break;

    case lir_fld:
        fld(op->in_opr()->as_jint());
        break;

    case lir_ffree:
        ffree(op->in_opr()->as_jint());
        break;

    case lir_branch:
        break;

    case lir_push:
        push(op->in_opr());
        break;

    case lir_pop:
        pop(op->in_opr());
        break;

    case lir_neg:
        negate(op->in_opr(), op->result_opr());
        break;

    case lir_leal:
        leal(op->in_opr(), op->result_opr());
        break;

    case lir_null_check:
        if (GenerateCompilerNullChecks) {
            ImplicitNullCheckStub* stub = add_debug_info_for_null_check_here(op->info());

            if (op->in_opr()->is_single_cpu()) {
                _masm->null_check(op->in_opr()->as_register(), stub->entry());
            } else {
                Unimplemented();
            }
        }
        break;

    case lir_monaddr:
        monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
        break;

#ifdef SPARC
    case lir_pack64:
        pack64(op->in_opr(), op->result_opr());
        break;

    case lir_unpack64:
        unpack64(op->in_opr(), op->result_opr());
        break;
#endif

    case lir_unwind:
        unwind_op(op->in_opr());
        break;

    default:
        Unimplemented();
        break;
    }
}
示例#7
0
void eval(BOOLEAN do_gc)
{
  static unsigned int count = 0;

  OBJECT_PTR exp = car(reg_next_expression);

  OBJECT_PTR opcode = car(exp);

  pin_globals();

  if(do_gc)
  {
    count++;

    if(count == GC_FREQUENCY)
    {
      gc(false, true);
      count = 0;
    }
  }

  if(opcode == APPLY && profiling_in_progress)
  {
    last_operator = reg_accumulator;

    if(prev_operator != NIL)
    {
      OBJECT_PTR operator_to_be_used;

      hashtable_entry_t *e;

      unsigned int count;
      unsigned int mem_alloc;
      double elapsed_wall_time;
      double elapsed_cpu_time;

      double temp1 = get_wall_time();
      clock_t temp2 = clock();
      unsigned int temp3 = memory_allocated();

      profiling_datum_t *pd = (profiling_datum_t *)malloc(sizeof(profiling_datum_t));

      if(IS_SYMBOL_OBJECT(prev_operator))
         operator_to_be_used = prev_operator;
      else
      {
        OBJECT_PTR res = get_symbol_from_value(prev_operator, reg_current_env);
        if(car(res) != NIL)
          operator_to_be_used = cdr(res);
        else
          operator_to_be_used = cons(LAMBDA,
                                     cons(get_params_object(prev_operator),
                                          cons(car(get_source_object(prev_operator)), NIL)));
      }

      e = hashtable_get(profiling_tab, (void *)operator_to_be_used);

      if(e)
      {
        profiling_datum_t *pd = (profiling_datum_t *)e->value;

        count = pd->count + 1;

        elapsed_wall_time = pd->elapsed_wall_time + temp1 - wall_time_var;
        elapsed_cpu_time = pd->elapsed_cpu_time + (temp2 - cpu_time_var) * 1.0 / CLOCKS_PER_SEC;
      
        mem_alloc = pd->mem_allocated + temp3 - mem_alloc_var;

        hashtable_remove(profiling_tab, (void *)operator_to_be_used);
        free(pd);
      }
      else
      {
        count = 1;
        elapsed_wall_time = temp1 - wall_time_var;
        elapsed_cpu_time = (temp2 - cpu_time_var) * 1.0 / CLOCKS_PER_SEC;
        mem_alloc = temp3 - mem_alloc_var;
      }

      pd->count = count;
      pd->elapsed_wall_time = elapsed_wall_time;
      pd->elapsed_cpu_time = elapsed_cpu_time;
      pd->mem_allocated = mem_alloc;

      hashtable_put(profiling_tab, (void *)operator_to_be_used, (void *)pd);
    }

    wall_time_var = get_wall_time();
    cpu_time_var = clock();
    mem_alloc_var = memory_allocated();

    prev_operator = reg_accumulator;
  }

  if(opcode == HALT)
  {
    halt_op();
  }
  else if(opcode == REFER)
  {
    if(refer(CADR(exp)))
       return;
    reg_next_expression = CADDR(exp);
  }
  else if(opcode == CONSTANT)
  {
    if(constant(CADR(exp)))
      return;
    reg_next_expression = CADDR(exp);
  }
  else if(opcode == CLOSE)
  {
    if(closure(exp))
      return;
    reg_next_expression = fifth(exp);
  }
  else if(opcode == MACRO)
  {
    if(macro(exp))
      return;
    reg_next_expression = CADDDDR(exp);
  }
  else if(opcode == TEST)
  {
    if(reg_accumulator != NIL)
      reg_next_expression = CADR(exp);
    else
      reg_next_expression = CADDR(exp);
  }
  //Not using this WHILE; reverting 
  //to macro definition, as this
  //version doesn't handle (BREAK)
  else if(opcode == WHILE)
  {
    OBJECT_PTR cond = CADR(exp);
    OBJECT_PTR body  = CADDR(exp);

    OBJECT_PTR ret = NIL;

    while(1)
    {
      OBJECT_PTR temp = reg_current_stack;

      reg_next_expression = cond;

      while(car(reg_next_expression) != NIL)
      {
        eval(false);
        if(in_error)
          return;
      }

      if(reg_accumulator == NIL)
        break;

      reg_next_expression = body;

      while(car(reg_next_expression) != NIL)
      {
        eval(false);
        if(in_error)
          return;
      }

      //to handle premature exits
      //via RETURN-FROM
      if(reg_current_stack != temp)
        return;

      ret = reg_accumulator;
    }

    reg_accumulator = ret;
    reg_next_expression = CADDDR(exp);
  }
  else if(opcode == ASSIGN)
  {
    if(assign(CADR(exp)))
      return;
    reg_next_expression = CADDR(exp);
  }
  else if(opcode == DEFINE)
  {
    if(define(CADR(exp)))
      return;
    reg_next_expression = CADDR(exp);
  }
  else if(opcode == CONTI)
  {
    if(conti())
      return;
    reg_next_expression = CADR(exp);
  }
  else if(opcode == NUATE) //this never gets called
  {
    reg_current_stack = CADR(exp);
    reg_accumulator = CADDR(exp);
    reg_current_value_rib = NIL;
    reg_next_expression =  cons(CONS_RETURN_NIL, cdr(reg_next_expression));
  }
  else if(opcode == FRAME)
  {
    if(frame(exp))
      return;
    reg_next_expression = CADDR(exp);
  }
  else if(opcode == ARGUMENT)
  {
    if(argument())
      return;
    reg_next_expression = CADR(exp);
  }
  else if(opcode == APPLY)
  {
    apply_compiled();
  }
  else if(opcode == RETURN)
  {
    return_op();
  }
}
// callback function is responsible for cleaning up all resources
// including op, op->_callback_node, and op->_callback_ptr
void MessageQueueService::_handle_async_callback(AsyncOpNode *op)
{
   if (op->_flags & ASYNC_OPFLAGS_SAFE_CALLBACK)
   {

      Message *msg = op->get_request();
      if (msg && (msg->getMask() & message_mask::ha_async))
      {
         if (msg->getType() == async_messages::ASYNC_LEGACY_OP_START)
         {
            AsyncLegacyOperationStart *wrapper =
               static_cast<AsyncLegacyOperationStart *>(msg);
            msg = wrapper->get_action();
            delete wrapper;
         }
         else if (msg->getType() == async_messages::ASYNC_MODULE_OP_START)
         {
            AsyncModuleOperationStart *wrapper =
               static_cast<AsyncModuleOperationStart *>(msg);
            msg = wrapper->get_action();
            delete wrapper;
         }
         else if (msg->getType() == async_messages::ASYNC_OP_START)
         {
            AsyncOperationStart *wrapper =
               static_cast<AsyncOperationStart *>(msg);
            msg = wrapper->get_action();
            delete wrapper;
         }
         delete msg;
      }

      msg = op->get_response();
      if (msg && (msg->getMask() & message_mask::ha_async))
      {
         if (msg->getType() == async_messages::ASYNC_LEGACY_OP_RESULT)
         {
            AsyncLegacyOperationResult *wrapper =
               static_cast<AsyncLegacyOperationResult *>(msg);
            msg = wrapper->get_result();
            delete wrapper;
         }
         else if (msg->getType() == async_messages::ASYNC_MODULE_OP_RESULT)
         {
            AsyncModuleOperationResult *wrapper =
               static_cast<AsyncModuleOperationResult *>(msg);
            msg = wrapper->get_result();
            delete wrapper;
         }
      }
      void (*callback)(Message *, void *, void *) = op->__async_callback;
      void *handle = op->_callback_handle;
      void *parm = op->_callback_parameter;
      op->release();
      return_op(op);
      callback(msg, handle, parm);
   }
   else if (op->_flags & ASYNC_OPFLAGS_CALLBACK)
   {
      // note that _callback_node may be different from op
      // op->_callback_response_q is a "this" pointer we can use for
      // static callback methods
      op->_async_callback(op->_callback_node, op->_callback_response_q, op->_callback_ptr);
   }
}
void LIR_Assembler::emit_op1(LIR_Op1* op) {
  switch (op->code()) {
    case lir_move:   
      if (op->move_kind() == lir_move_volatile) {
        assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
        volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
      } else {
move_op(op->in_opr(),op->result_opr(),op->tmp1_opr(),op->tmp2_opr(),op->tmp3_opr(),op->type(),
                op->patch_code(), op->info(), op->move_kind() == lir_move_unaligned);
      }
      break;

    case lir_prefetchr:
      prefetchr(op->in_opr());
      break;

    case lir_prefetchw:
      prefetchw(op->in_opr());
      break;

    case lir_return:
      return_op(op->in_opr()); 
      break;
    
    case lir_branch:
      break;

    case lir_push:
      push(op->in_opr());
      break;

    case lir_pop:
      pop(op->in_opr());
      break;

    case lir_neg:
      negate(op->in_opr(), op->result_opr());
      break;
    
    case lir_bit_test:
bit_test(op->in_opr(),op->result_opr());
      break;
    
    case lir_leal:
      leal(op->in_opr(), op->result_opr());
      break;
    
    case lir_null_check:
      if (GenerateCompilerNullChecks) {
null_check(op->in_opr(),op->info());
      }
      break;

    case lir_klassTable_oop_load:
      klassTable_oop_load(op->in_opr(), op->result_opr(), op->tmp1_opr());
      break;

    case lir_monaddr:
      monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
      break;

    default:
      Unimplemented();
      break;
  }
}