Exemplo n.º 1
0
void fsocket_reset(fsocket_t *fs) {
	e_int32 ret;
	if (fs->state) {
		//任何时候,单条fake socket上不会发生并行请求,所以,这里只有一个等待中的请求。
		//执行此句后,会因为返回值不正确,fake socket的所有者会检测异常,并退出请求过程
		//中止fake socket调用。
		ret = semaphore_timeoutwait(&fs->recv_sem, 1);
		if (!ret) {
			fs->buf[0] = 0;
			semaphore_post(&fs->recv_sem); //让等待消息的队列清空
		}
	}
}
/*!
 * @brief IO system shut down.
 *
 * When user wants to stop the codec system, this
 * function call is needed, to release the interrupt
 * signal, free the working buffer/code buffer/parameter
 * buffer, unmap the register into user space, and
 * close the codec device.
 *
 * @param none
 *
 * @return
 * @li   0	System shutting down success.
 * @li   -1		System shutting down failure.
 */
int IOSystemShutdown(void)
{

	/* Exit directly if already shutdown */
	if (vpu_fd == -1)
		return 0;

	/* Make sure real shutdown is done when no instance needs
	   to access vpu in the same process */
	if (vpu_active_num > 1) {
		vpu_active_num--;
		return 0;
	} else if (!vpu_active_num) {
		warn_msg(" No instance is actived\n");
		return 0;
	}

	if (!semaphore_wait(vpu_semap, API_MUTEX)) {
		err_msg("Unable to get mutex\n");
		return -1;
	}

	/*
	 * Do not call IOFreePhyMem/IOFreePhyPicParaMem/IOFreePhyUserDataMem
	 * to free memory, let kernel do.
	 */
#ifdef BUILD_FOR_ANDROID
	if (bit_work_addr.virt_uaddr != 0) {
		if (munmap((void *)bit_work_addr.virt_uaddr, bit_work_addr.size) != 0)
			err_msg("munmap failed\n");
	}
	bit_work_addr.virt_uaddr = 0;
#else
	IOFreeVirtMem(&bit_work_addr);
#endif

	if (munmap((void *)vpu_reg_base, BIT_REG_MARGIN) != 0)
		err_msg("munmap failed\n");

	vpu_active_num--;

	semaphore_post(vpu_semap, API_MUTEX);
	vpu_semaphore_close(vpu_shared_mem);

	if (vpu_fd >= 0) {
		close(vpu_fd);
		vpu_fd = -1;
	}

	return 0;
}
Exemplo n.º 3
0
DECLARE_TEST( semaphore, initialize )
{
	semaphore_t sem;

	semaphore_initialize( &sem, 0 );
	EXPECT_FALSE( semaphore_try_wait( &sem, 100 ) );
	semaphore_destroy( &sem );

	semaphore_initialize( &sem, 1 );
	EXPECT_TRUE( semaphore_try_wait( &sem, 100 ) );
	semaphore_post( &sem ); //Restored value
	semaphore_destroy( &sem );

	semaphore_initialize( &sem, 2 );
	EXPECT_TRUE( semaphore_wait( &sem ) );
	EXPECT_TRUE( semaphore_try_wait( &sem, 100 ) );
	EXPECT_FALSE( semaphore_try_wait( &sem, 100 ) );
	semaphore_post( &sem );
	semaphore_post( &sem ); //Restored value
	semaphore_destroy( &sem );
	
	return 0;
}
bool fixed_queue_try_enqueue(fixed_queue_t *queue, void *data) {
  assert(queue != NULL);
  assert(data != NULL);

  if (!semaphore_try_wait(queue->enqueue_sem))
    return false;

  pthread_mutex_lock(&queue->lock);
  list_append(queue->list, data);
  pthread_mutex_unlock(&queue->lock);

  semaphore_post(queue->dequeue_sem);
  return true;
}
void *fixed_queue_dequeue(fixed_queue_t *queue) {
  assert(queue != NULL);

  semaphore_wait(queue->dequeue_sem);

  pthread_mutex_lock(&queue->lock);
  void *ret = list_front(queue->list);
  list_remove(queue->list, ret);
  pthread_mutex_unlock(&queue->lock);

  semaphore_post(queue->enqueue_sem);

  return ret;
}
Exemplo n.º 6
0
DECLARE_TEST( semaphore, threaded )
{
	object_t thread[32];
	int ith;
	int failed_waits;

	semaphore_test_t test;
	semaphore_initialize( &test.read, 0 );
	semaphore_initialize( &test.write, 0 );
	test.loopcount = 128;
	test.counter = 0;
	
	for( ith = 0; ith < 32; ++ith )
	{
		thread[ith] = thread_create( semaphore_waiter, "semaphore_waiter", THREAD_PRIORITY_NORMAL, 0 );
		thread_start( thread[ith], &test );
	}

	test_wait_for_threads_startup( thread, 32 );

	failed_waits = 0;
	for( ith = 0; ith < test.loopcount * 32; ++ith )
	{
		semaphore_post( &test.read );
		thread_yield();
		if( !semaphore_try_wait( &test.write, 200 ) )
		{
			failed_waits++;
			EXPECT_TRUE( semaphore_wait( &test.write ) );
		}
	}

	for( ith = 0; ith < 32; ++ith )
	{
		thread_terminate( thread[ith] );
		thread_destroy( thread[ith] );
		thread_yield();
	}

	test_wait_for_threads_exit( thread, 32 );

	EXPECT_EQ( test.counter, test.loopcount * 32 );
	EXPECT_EQ( failed_waits, 0 );

	semaphore_destroy( &test.read );
	semaphore_destroy( &test.write );
	
	return 0;
}
Exemplo n.º 7
0
static void* semaphore_waiter( object_t thread, void* arg )
{
	semaphore_test_t* sem = arg;
	int loop;

	for( loop = 0; loop < sem->loopcount; ++loop )
	{
		thread_yield();
		semaphore_wait( &sem->read );
		++sem->counter;
		semaphore_post( &sem->write );
	}
	
	return 0;
}
Exemplo n.º 8
0
void alarm_cleanup(void) {
  // If lazy_initialize never ran there is nothing to do
  if (!alarms)
    return;

  callback_thread_active = false;
  semaphore_post(alarm_expired);
  thread_free(callback_thread);
  callback_thread = NULL;

  semaphore_free(alarm_expired);
  alarm_expired = NULL;
  timer_delete(&timer);
  list_free(alarms);
  alarms = NULL;

  pthread_mutex_destroy(&monitor);
}
Exemplo n.º 9
0
static unsigned long thread_func(void *param)
{
    ThreadParams *thread_params = (ThreadParams*)param;
    BucketGrid   *bucket_grid   = (BucketGrid*)thread_params->bucket_grid;
    RenderParams *render_params = (RenderParams*)thread_params->render_params;

    u32 bucket_id = __sync_fetch_and_add(bucket_grid->current_bucket, 1);
    while(bucket_id < bucket_grid->num_buckets){
        u32 bucket_index = hilbert_curve_transform_bucket_id(
                bucket_grid->num_buckets_x,bucket_id);
        bucket_grid->active_buckets[bucket_index] = 1;
        path_trace(*render_params,*bucket_grid,bucket_index);
        bucket_grid->active_buckets[bucket_index] = 0;
        bucket_grid->done_buckets[bucket_index] = 1;
        semaphore_post(bucket_grid->bucket_done);
        bucket_id = __sync_fetch_and_add(bucket_grid->current_bucket, 1);
    }
    return 0;
}
Exemplo n.º 10
0
/* Wake up all the potential handlers for this RPC target. Return number of
 * handlers posted to. */
static int wake_up_handlers_for_target(const TFN_Ptr function, int box_id)
{
    int num_posted = 0;

    UvisorBoxIndex * index = (UvisorBoxIndex *) g_context_current_states[box_id].bss;
    uvisor_pool_queue_t * fn_group_queue = &index->rpc_fn_group_queue->queue;
    uvisor_rpc_fn_group_t * fn_group_array = index->rpc_fn_group_queue->fn_groups;

    /* Wake up all known waiters for this function. Search for the function in
     * all known function groups. We have to search through all function groups
     * (not just those currently waiting for messages) because we want the RTOS
     * to be able to pick the highest priority waiter to schedule to run. Some
     * waiters will wake up and find they have nothing to do if a higher
     * priority waiter already took care of handling the incoming RPC. */
    uvisor_pool_slot_t slot;
    slot = fn_group_queue->head;
    while (slot < fn_group_queue->pool->num) {
        /* Look for the function in this function group. */
        uvisor_rpc_fn_group_t * fn_group = &fn_group_array[slot];

        TFN_Ptr const * fn_ptr_array = fn_group->fn_ptr_array;
        size_t i;

        for (i = 0; i < fn_group->fn_count; i++) {
            /* If function is found: */
            if (fn_ptr_array[i] == function) {
                /* Wake up the waiter. */
                semaphore_post(&fn_group->semaphore);
                ++num_posted;
            }
        }

        slot = fn_group_queue->pool->management_array[slot].queued.next;
    }

    return num_posted;
}
Exemplo n.º 11
0
static void unlock(vbv_t* vbv)
{
    printf("vbv unlock\n");
	semaphore_post(vbv->mutex);
    return;
}
Exemplo n.º 12
0
 void post()
 {  semaphore_post(&m_sem); }
Exemplo n.º 13
0
 void post()
 {  semaphore_post(mp_sem); }
Exemplo n.º 14
0
static void drain_result_queue(void)
{
    UvisorBoxIndex * callee_index = (UvisorBoxIndex *) *__uvisor_config.uvisor_box_context;
    uvisor_pool_queue_t * callee_queue = &callee_index->rpc_incoming_message_queue->done_queue;
    uvisor_rpc_message_t * callee_array = (uvisor_rpc_message_t *) callee_queue->pool->array;

    int callee_box = g_active_box;

    /* Verify that the callee queue is entirely in caller box BSS. We check the
     * entire queue instead of just the message we are interested in, because
     * we want to validate the queue before we attempt any operations on it,
     * like dequeing. */
    if (!is_valid_queue(callee_queue, callee_box))
    {
        /* The callee's done queue is not valid. This shouldn't happen in a
         * non-malicious system. */
        assert(false);
        return;
    }

    /* For each message in the queue: */
    do {
        uvisor_pool_slot_t callee_slot;

        /* Dequeue the first result message from the queue. */
        callee_slot = uvisor_pool_queue_try_dequeue_first(callee_queue);
        if (callee_slot >= callee_queue->pool->num) {
            /* The queue is empty or busy. */
            break;
        }

        uvisor_rpc_message_t * callee_msg = &callee_array[callee_slot];

        /* Look up the origin message. This should have been remembered
         * by uVisor when it did the initial delivery. */
        uvisor_pool_slot_t caller_slot = uvisor_result_slot(callee_msg->match_cookie);


        /* Based on the origin message, look up the box to return the result to
         * (caller box). */
        const int caller_box = callee_msg->other_box_id;

        UvisorBoxIndex * caller_index = (UvisorBoxIndex *) g_context_current_states[caller_box].bss;
        uvisor_pool_queue_t * caller_queue = &caller_index->rpc_outgoing_message_queue->queue;
        uvisor_rpc_message_t * caller_array = (uvisor_rpc_message_t *) caller_queue->pool->array;

        /* Verify that the caller queue is entirely in caller box BSS. We check the
         * entire queue instead of just the message we are interested in, because
         * we want to validate the queue before we attempt any operations on it. */
        if (!is_valid_queue(caller_queue, caller_box))
        {
            /* The caller's outgoing queue is not valid. The caller queue is
             * messed up. This shouldn't happen in a non-malicious system.
             * Discard the result message (not retrying later), because the
             * caller is malicious. */
            assert(false);
            continue;
        }

        uvisor_rpc_message_t * caller_msg = &caller_array[caller_slot];

        /* Verify that the caller box is waiting for the callee box to complete
         * the RPC in this slot. */

        /* Other box ID must be same. */
        if (caller_msg->other_box_id != callee_box) {
            /* The caller isn't waiting for this box to complete it. This
             * shouldn't happen in a non-malicious system. */
            assert(false);
            continue;
        }

        /* The caller must be waiting for a box to complete this slot. */
        if (caller_msg->state != UVISOR_RPC_MESSAGE_STATE_SENT)
        {
            /* The caller isn't waiting for any box to complete it. This
             * shouldn't happen in a non-malicious system. */
            assert(false);
            continue;
        }

        /* The match_cookie must be same. */
        if (caller_msg->match_cookie != callee_msg->match_cookie) {
            /* The match cookies didn't match. This shouldn't happen in a
             * non-malicious system. */
            assert(false);
            continue;
        }

        /* Copy the result to the message in the caller box outgoing message
         * queue. */
        caller_msg->result = callee_msg->result;
        callee_msg->state = UVISOR_RPC_MESSAGE_STATE_IDLE;
        caller_msg->state = UVISOR_RPC_MESSAGE_STATE_DONE;

        /* Now that we've copied the result, we can free the message from the
         * callee queue. The callee (the one sending result messages) doesn't
         * care about the message after they post it to their outgoing result
         * queue. */
        callee_slot = uvisor_pool_queue_try_free(callee_queue, callee_slot);
        if (callee_slot >= callee_queue->pool->num) {
            /* The queue is empty or busy. This should never happen. We were
             * able to dequeue a result message, but weren't able to free the
             * result message. It is bad to take down the entire system. It is
             * also bad to never free slots in the outgoing result queue.
             * However, if we could dequeue the slot we should have no trouble
             * freeing the slot here. */
            assert(false);
            break;
        }

        /* Post to the result semaphore, ignoring errors. */
        int status;
        status = semaphore_post(&caller_msg->semaphore);
        if (status) {
            /* We couldn't post to the result semaphore. We shouldn't really
             * bring down the entire system if one box messes up its own
             * semaphore. In a non-malicious system, this should never happen.
             * */
            assert(false);
        }
    } while (1);
}
/*!
 * @brief IO system initialization.
 *  When user wants to start up the codec system,
 *  this function call is needed, to open the codec device,
 *  map the register into user space,
 *  get the working buffer/code buffer/parameter buffer,
 *  download the firmware, and then set up the interrupt signal path.
 *
 * @param callback vpu interrupt callback.
 *
 * @return
 * @li  0	          System initialization success.
 * @li -1		System initialization failure.
 */
int IOSystemInit(void *callback)
{
	int ret;

	/* Exit directly if already initialized */
	if (vpu_fd > 0) {
		vpu_active_num++;
		return 0;
	}

	ret = get_system_rev();
	if (ret == -1) {
		err_msg("Error: Unable to obtain system rev information\n");
		return -1;
	}

	vpu_fd = open("/dev/mxc_vpu", O_RDWR);
	if (vpu_fd < 0) {
		err_msg("Can't open /dev/mxc_vpu\n");
		return -1;
	}

	vpu_semap = vpu_semaphore_open();
	if (vpu_semap == NULL) {
		err_msg("Error: Unable to open vpu shared memory file\n");
		close(vpu_fd);
		vpu_fd = -1;
		return -1;
	}

	if (!semaphore_wait(vpu_semap, API_MUTEX)) {
		err_msg("Error: Unable to get mutex\n");
		close (vpu_fd);
		vpu_fd = -1;
		return -1;
	}

	vpu_reg_base = (unsigned long)mmap(NULL, BIT_REG_MARGIN,
					   PROT_READ | PROT_WRITE,
					   MAP_SHARED, vpu_fd, 0);

	if ((void *)vpu_reg_base == MAP_FAILED) {
		err_msg("Can't map register\n");
		close(vpu_fd);
		vpu_fd = -1;
		semaphore_post(vpu_semap, API_MUTEX);
		return -1;
	}

	vpu_active_num++;

	IOClkGateSet(true);
	bit_work_addr.size = TEMP_BUF_SIZE + PARA_BUF_SIZE +
	    					CODE_BUF_SIZE + PARA_BUF2_SIZE;

	if (_IOGetPhyMem(VPU_IOC_GET_WORK_ADDR, &bit_work_addr) < 0) {
		err_msg("Get bitwork address failed!\n");
		goto err;
	}

	if (IOGetVirtMem(&bit_work_addr) <= 0)
		goto err;

	UnlockVpu(vpu_semap);
	return 0;

      err:
	err_msg("Error in IOSystemInit()");
	UnlockVpu(vpu_semap);
	IOSystemShutdown();
	return -1;
}
Exemplo n.º 16
0
static void unlock(fbm_t* fbm)
{
	semaphore_post(fbm->mutex);
    return;
}
/*!
 * @brief IO system initialization.
 *  When user wants to start up the codec system,
 *  this function call is needed, to open the codec device,
 *  map the register into user space,
 *  get the working buffer/code buffer/parameter buffer,
 *  download the firmware, and then set up the interrupt signal path.
 *
 * @param callback vpu interrupt callback.
 *
 * @return
 * @li  0	          System initialization success.
 * @li -1		System initialization failure.
 */
int IOSystemInit(void *callback)
{
	int ret;

	/* Exit directly if already initialized */
	if (vpu_fd > 0) {
		vpu_active_num++;
		return 0;
	}

	ret = get_system_rev();
	if (ret == -1) {
		err_msg("Error: Unable to obtain system rev information\n");
		return -1;
	}

	vpu_fd = open("/dev/mxc_vpu", O_RDWR);
	if (vpu_fd < 0) {
		err_msg("Can't open /dev/mxc_vpu: %s\n", strerror(errno));
		return -1;
	}

	vpu_shared_mem = vpu_semaphore_open();
	if (vpu_shared_mem == NULL) {
		err_msg("Error: Unable to open vpu shared memory file\n");
		close(vpu_fd);
		vpu_fd = -1;
		return -1;
	}

	if (!semaphore_wait(vpu_semap, API_MUTEX)) {
		err_msg("Error: Unable to get mutex\n");
		close (vpu_fd);
		vpu_fd = -1;
		return -1;
	}

	vpu_reg_base = (unsigned long)mmap(NULL, BIT_REG_MARGIN,
					   PROT_READ | PROT_WRITE,
					   MAP_SHARED, vpu_fd, 0);

	if ((void *)vpu_reg_base == MAP_FAILED) {
		err_msg("Can't map register\n");
		close(vpu_fd);
		vpu_fd = -1;
		semaphore_post(vpu_semap, API_MUTEX);
		return -1;
	}

	vpu_active_num++;

	IOClkGateSet(true);
#ifdef BUILD_FOR_ANDROID
	unsigned long va_addr;

	/* Special handle the bit work buffer, which reserved in vpu driver probe */
	bit_work_addr.size = TEMP_BUF_SIZE + PARA_BUF_SIZE +
			     CODE_BUF_SIZE + PARA_BUF2_SIZE;
	if (_IOGetPhyMem(VPU_IOC_GET_WORK_ADDR, &bit_work_addr) < 0) {
		err_msg("Get bitwork address failed!\n");
		goto err;
	}

	va_addr = (unsigned long)mmap(NULL, bit_work_addr.size, PROT_READ | PROT_WRITE,
					MAP_SHARED, vpu_fd, bit_work_addr.phy_addr);
	if ((void *)va_addr == MAP_FAILED) {
		bit_work_addr.virt_uaddr = 0;
		goto err;
	}

	bit_work_addr.virt_uaddr = va_addr;
#else
	bit_work_addr.size = TEMP_BUF_SIZE + PARA_BUF_SIZE +
			     CODE_BUF_SIZE + PARA_BUF2_SIZE;
	if (_IOGetPhyMem(VPU_IOC_GET_WORK_ADDR, &bit_work_addr) < 0) {
		err_msg("Get bitwork address failed!\n");
		goto err;
	}

	if (IOGetVirtMem(&bit_work_addr) == -1)
		goto err;
#endif
	UnlockVpu(vpu_semap);
	return 0;

      err:
	err_msg("Error in IOSystemInit()");
	UnlockVpu(vpu_semap);
	IOSystemShutdown();
	return -1;
}
Exemplo n.º 18
0
/*
 * Ninja thread run this function
 * To find pseudo code and design detail, please see REAMME.pdf
 */
void *ninja(void *threadid)
{
    int ret;
    int tid = (intptr_t)threadid;
    double ttime;
    double tstop;
    while(1)
    {   
        tstop = (double)clock()/CLOCKS_PER_SEC;
        ttime= tstop-tstart;
        ttime = ttime * 10;
        // printf("Time = %f",ttime);
        if(ttime > time_to_run)
        {
           pthread_exit(NULL);
        }
        printf("Ninja       %d | Waiting \n",tid);

        /*  Wait the mutex_register  */
        if( 0 != (ret = semaphore_wait(&mutex_register)) ) {
            fprintf(stderr, "Error: semaphore_wait() failed with %d in thread %d\n", ret, tid);
            pthread_exit(NULL);
        }
        /*  Wait the mutex_ninja_count  */
        if( 0 != (ret = semaphore_wait(&mutex_ninja_count)) ) {
            fprintf(stderr, "Error: semaphore_wait() failed with %d in thread %d\n", ret, tid);
            pthread_exit(NULL);
        }
        /* Add one pirate */
        ninja_count++;
        if(ninja_count ==1)
        {
            /* Wait department */
            if( 0 != (ret = semaphore_wait(&mutex_department)) ) {
                fprintf(stderr, "Error: semaphore_wait() failed with %d in thread %d\n", ret, tid);
                pthread_exit(NULL);
            }
        }
    
        /* Post mutex_ninja_count */
        if( 0 != (ret = semaphore_post(&mutex_ninja_count)) ) {
            fprintf(stderr, "Error: semaphore_post() failed with %d in thread %d\n", ret, tid);
            pthread_exit(NULL);
        }
        
        /* Post mutex_register */
        if( 0 != (ret = semaphore_post(&mutex_register)) ) {
            fprintf(stderr, "Error: semaphore_post() failed with %d in thread %d\n", ret, tid);
            pthread_exit(NULL);
        }

        /* Wait team */
        if( 0 != (ret = semaphore_wait(&mutex_team)) ) {
            fprintf(stderr, "Error: semaphore_wait() failed with %d in thread %d\n", ret, tid);
            pthread_exit(NULL);
        }
        ninja_enter[tid]++;
        printf("Ninja       %d | Costume preparation\n",tid);
        int random_num = random() % 5000;
        usleep(random_num);

        /* Post mutex_team */
        if( 0 != (ret = semaphore_post(&mutex_team)) ) {
            fprintf(stderr, "Error: semaphore_post() failed with %d in thread %d\n", ret, tid);
            pthread_exit(NULL);
        }
        /* Wait mutex_ninja_count */
        if( 0 != (ret = semaphore_wait(&mutex_ninja_count)) ) {
            fprintf(stderr, "Error: semaphore_wait() failed with %d in thread %d\n", ret, tid);
            pthread_exit(NULL);
        }
        ninja_left[tid]++;
        ninja_count--;
        printf("Ninja       %d | Leaving \n",tid);
        if(ninja_count == 0)
        {
            /* Post mutex_department */
            if( 0 != (ret = semaphore_post(&mutex_department)) ) {
                fprintf(stderr, "Error: semaphore_post() failed with %d in thread %d\n", ret, tid);
                pthread_exit(NULL);
            } 
        }
        /* Post mutex_ninja_count */
        if( 0 != (ret = semaphore_post(&mutex_ninja_count)) ) {
            fprintf(stderr, "Error: semaphore_post() failed with %d in thread %d\n", ret, tid);
            pthread_exit(NULL);
        }
        random_num = random() % 1000;
        // printf("I sleep %d",random_num);
        usleep(random_num);
    }
}
Exemplo n.º 19
0
// Callback function for wake alarms and our posix timer
static void timer_callback(UNUSED_ATTR void *ptr) {
  semaphore_post(alarm_expired);
}
Exemplo n.º 20
0
TEST(Semaphore, Link)
{
    Semaphore s = SEMAPHORE_INIT;

    semaphore_post(& s);
}
Exemplo n.º 21
0
/**
 *\brief 网络连接发送请求函数。
 *\param fs 定义了套接子得相关属性。
 *\param msg 定义了发送请求消息。
 *\param mlen 定义了发送请求消息长度。
 *\param recv_buf 定义了接收缓存。
 *\param recv_len 定义了接收消息长度。
 *\param timeout_usec 定义了接收消息超时时间。
 *\retval E_OK 表示成功。
 */
e_int32 fsocket_request(fsocket_t *fs, e_uint8 *msg, e_uint32 mlen,
		e_uint8 *recv_buf, e_uint32 recv_len, e_uint32 timeout_usec) {
	e_int32 ret;
	e_uint8 req_id;
	e_uint8 s_id;
	int req_iid = -1, s_iid = -1;

	/* Timeval structs for handling timeouts */
	e_uint32 beg_time, elapsed_time;

	e_assert(fs&&fs->state, E_ERROR_INVALID_HANDLER);

//	DMSG((STDOUT, "FAKE SOCKE [%s:%u] try request,current rq_id=%u...\r\n", fs->name, (unsigned int) fs->id,(unsigned int) fs->rq_id));

	//请求发送锁
	ret = semaphore_timeoutwait(&fs->send_sem, timeout_usec);
	e_assert(ret, E_ERROR_LOCK_FAILED);

	/* Acquire the elapsed time since epoch */
	beg_time = GetTickCount();

	//发送请求
	ret = send_one_msg(fs, msg, mlen, timeout_usec);
	if (e_failed(ret))
		goto END;

	//等待回复
	elapsed_time = GetTickCount() - beg_time;

	while (timeout_usec <= 0
			|| (elapsed_time = GetTickCount() - beg_time) < timeout_usec) {
		if (timeout_usec <= 0) //没有设置超时,死等
				{
			ret = wait_for_reply_forever(fs);
		} else {
			ret = wait_for_reply(fs, timeout_usec - elapsed_time);
		}
		if (!e_failed(ret)) {
			//取出消息,和请求号
			recv_len = recv_len >= MSG_MAX_LEN ? MSG_MAX_LEN : recv_len;
			sscanf(fs->buf, "#%02X%02X%[^@]", &s_iid, &req_iid, recv_buf);
			s_id = s_iid & 0xFF;
			req_id = req_iid & 0xFF;
			//TODO:无视过时消息?
			if (req_id < fs->rq_id) {
				DMSG((STDOUT, "FAKE SOCKE [%s:%u:%u] 取到过时消息:id=%u \n忽略,继续等待下一个消息\n", fs->name, (unsigned int) fs->id, (unsigned int) fs->rq_id, (unsigned int) req_id));
				continue;
			} else if (req_id > fs->rq_id) {
//				DMSG((STDOUT,"出现消息号异常,请检查!\n"));
//				while (1)
//					;
			}
			break;
		}
		break;
	}

	END:
	//处理完成,提醒可以发下一个请求了
	ret = semaphore_post(&fs->send_sem);
	e_assert(ret, E_ERROR_TIME_OUT);
//	DMSG(
//	(STDOUT, "[%s_%u_%u]FAKE SOCKET release send sem...\r\n", fs->name, (unsigned int)fs->id,(unsigned int)fs->rq_id));

	elapsed_time = GetTickCount() - beg_time;
	if (MSG_LEVEL_VERBOSE)
		DMSG((STDOUT, "FAKE SOCKET [%s:%u:%u]  request done in %u Ms...\r\n", fs->name, (unsigned int) fs->id, req_id, (int) (elapsed_time
				/ 1000)));
	return E_OK;
}