示例#1
0
static	void
NeWS_send(const char *cp, size_t len)
{
    struct sigaction orig;

    if (PostScript == (PSFILE *) NULL || (globals.ev.flags & NeWS_ev_mask))
	return;

    (void) sigaction(SIGPIPE, &psio_sigpipe_handler_struct, &orig);
    sigpipe_error = False;

    NeWS_send_byte = cp;
    NeWS_send_end = cp + len;
    NeWS_xout.xio_events |= XIO_OUT;
#if HAVE_POLL
    if (NeWS_xout.pfd != NULL)
	NeWS_xout.pfd->events |= POLLOUT;
#endif

    write_to_NeWS();
    (void) read_events(NeWS_ev_mask | EV_ACK);

    if (!(globals.ev.flags & EV_ACK)) {	/* if interrupted */
	/* ||| Do somthing more severe here */
    }

    globals.ev.flags &= ~EV_ACK;

    /* put back generic handler for SIGPIPE */
    (void) sigaction(SIGPIPE, &orig, (struct sigaction *) NULL);

    if (!NeWS_in_header)
	post_send();
}
示例#2
0
    /// 接收线程推进
    void tcp_session::run()
    {
        if (m_notify_connected)
        {
            connect_invoke();
            m_notify_connected = false;
        }

        post_recv(false);
        post_send(false);

        std::size_t bytes;
        while ((bytes = m_recvbuf.gcount()) > 0)
        {
            on_recv(m_recvbuf.gptr(), bytes);
            m_recvbuf.gbump(bytes);
        }

        try_shutdown();

        if (m_both_closed.is_locked() && m_error)
        {
            on_disconnect(m_error);
            m_error.clear();
            m_valid = false;
        }
    }
示例#3
0
void client_session::connected()
{
	printf("连接服务器成功...\n");
	post_recv();
	char data[255];
	gets_s(data);
	post_send(data, strlen(data)+1);
	
}
示例#4
0
void client_session::on_recv()
{
	printf("Server say: %s\n", get_recv_data());
	post_recv();

	char data[255];
	gets_s(data);
	post_send(data, strlen(data)+1);
}
示例#5
0
int RtspClientConnection::setRtspResponse(const char * responseStr){
	std::string response;
	append(response, 
		"RTSP/1.0 %s\r\n"
		"CSeq: %s\r\n"
		"%s\r\n",
		responseStr,
		m_seqNo.c_str(),
		dateStr().c_str());
	return post_send(response.c_str(), response.length());
}
示例#6
0
    /// 发送回调
    void tcp_session::send_handler( boost::system::error_code const& ec, std::size_t bytes )
    {
        if (ec)
        {
            set_disconnect_error(ec);
            passive_shutdown(socket_base::shutdown_send);
            return ;
        }

        m_sendbuf.gbump(bytes);
        post_send(true);
    }
示例#7
0
int RtspClientConnection::handle_describle(
										   std::string & urlPreSuffix,
										   std::string & urlSuffix,
										   std::string & fullRequestStr)
{

	std::string urlTotalSuffix = urlPreSuffix ;
	if (!urlTotalSuffix.empty()){
		urlTotalSuffix.append("/");
	}
	urlTotalSuffix.append(urlSuffix);

	//authenticationOK, add

	MediaSession * session = m_rtsp->createAttachMediaSession(urlTotalSuffix);
	if (!session){
		handleCmd_notFound();
		return -1;
	}

	const ipaddr & localAddr = m_client->get_localaddr();
	std::string sdp = session->GenerateSDPDescription(localAddr);

	//get the rtsp url
	//rtsp://127.0.0.1/
	std::string rtspUrl;

	append(rtspUrl, "rtsp://%s:%u/%s", 
		localAddr.ip, watcher::instance()->tcp_port() ,
		session->StreamName().c_str());

	std::string response = "RTSP/1.0 200 OK\r\n";

	append(response, "CSeq: %s\r\n"
					 "%s"
					 "Content-Base: %s\r\n"
					 "Content-Type: application/sdp\r\n"
					 "Content-Length: %d\r\n\r\n"
					 "%s",
					 getSeq().c_str(),
					 dateStr().c_str(),
					 rtspUrl.c_str(),
					 sdp.length(),
					 sdp.c_str());

	debug_log("S-C : %s\n", response.c_str());
	
	return post_send(response.c_str(), response.length());
}
示例#8
0
int RtspClientConnection::handle_options(){

	std::string str;
	append(str, 
		"RTSP/1.0 200 OK\r\n"
		"CSeq: %s\r\n"
		"%s"
		"Public: %s\r\n\r\n",
		getSeq().c_str(), 
		dateStr().c_str(), 
		"OPTIONS, DESCRIBE, SETUP, TEARDOWN, PLAY, PAUSE, GET_PARAMETER, SET_PARAMETER");

	return post_send(str.c_str(), str.length());

}
示例#9
0
static void
endheaderNeWS(void)
{
    static const char str[] = "stop\n%%xdvimark\n";

    if (globals.debug & DBG_PS)
	puts("Running endheaderNeWS()");

    if (NeWS_active) {
	NeWS_send(str, (sizeof str) - 1);
	NeWS_active = False;
	post_send();
	waitack();
	NeWS_in_header = False;
	NeWS_ev_mask = NEWS_MASK_NORMAL;
    }
}
示例#10
0
/* this function is called with endpoint->endpoint_lock held */
int mca_btl_openib_endpoint_post_send(mca_btl_openib_endpoint_t *endpoint,
        mca_btl_openib_send_frag_t *frag)
{
    int prio = to_base_frag(frag)->base.des_flags & MCA_BTL_DES_FLAGS_PRIORITY;
    mca_btl_openib_header_t *hdr = frag->hdr;
    mca_btl_base_descriptor_t *des = &to_base_frag(frag)->base;
    int qp, ib_rc, rc;
    bool do_rdma = false;
    size_t size;

    if(OPAL_LIKELY(des->order == MCA_BTL_NO_ORDER))
        des->order = frag->qp_idx;

    qp = des->order;

    if(acquire_wqe(endpoint, frag) != OPAL_SUCCESS)
        return OPAL_ERR_RESOURCE_BUSY;

    size = des->des_segments->seg_len + frag->coalesced_length;

    rc = mca_btl_openib_endpoint_credit_acquire (endpoint, qp, prio, size,
                                                 &do_rdma, frag, true);
    if (OPAL_UNLIKELY(OPAL_SUCCESS != rc)) {
        qp_put_wqe(endpoint, qp);
        return OPAL_ERR_RESOURCE_BUSY;
    }

    qp_reset_signal_count(endpoint, qp);
    ib_rc = post_send(endpoint, frag, do_rdma, 1);

    if(!ib_rc)
        return OPAL_SUCCESS;

    if(endpoint->nbo)
        BTL_OPENIB_HEADER_NTOH(*hdr);

    mca_btl_openib_endpoint_credit_release (endpoint, qp, do_rdma, frag);

    qp_put_wqe(endpoint, qp);

    BTL_ERROR(("error posting send request error %d: %s. size = %lu\n",
               ib_rc, strerror(ib_rc), size));
    return OPAL_ERROR;
}
示例#11
0
int RtspClientConnection::setRtspResponse(const char * responseStr, const char * contentStr){
	if (!contentStr){
		contentStr = "";
	}
	std::string response;

	append(response,
		"RTSP/1.0 %s\r\n"
		"CSeq: %s\r\n"
		"%s"
		"Content-Length: %d\r\n\r\n"
		"%s",
		responseStr,
		m_seqNo.c_str(),
		dateStr().c_str(),
		strlen(contentStr),
		contentStr);
	return post_send(response.c_str(), response.length());
}
示例#12
0
    /// 发送数据
    bool tcp_session::send( char const* buf, std::size_t bytes )
    {
        boost::mutex::scoped_lock lock(m_send_mutex);

        if (m_shutdown_lock.is_locked())
            return false;

        if (m_sendbuf.spare() < bytes)
        {
            if (m_opts->sendbufoverflow_disconnect)
            {
                set_disconnect_error(boost::system::error_code(cec_sendbuf_overflow
                    , cobwebs_error_category::getInstance()));
                shutdown();
            }
            return false;
        }
        
        m_sendbuf.sputn(buf, bytes);
        post_send(false);       ///< 立即推送一次, 以提供发送速度和响应速度.
        return true;
    }
示例#13
0
static int rdma_send(struct thread_context_t *t_ctx, struct rdma_req_t *rdma_req) 
{
	int rc = 0;
	struct rdma_resource_t *rdma_resource;
	struct user_param_t *user_param;
	struct rdma_buf_t *rdma_buf;
	struct ibv_wc wc;

	rdma_resource = t_ctx->rdma_resource;
	user_param    = &(rdma_resource->user_param);
	rdma_buf      = rdma_req->rdma_buf;

	rdma_buf->slid = rdma_resource->port_attr.lid;
	rdma_buf->dlid = t_ctx->remote_lid;
	rdma_buf->sqpn = t_ctx->qp->qp_num;
	rdma_buf->dqpn = t_ctx->remote_qpn;

	rc = post_send(t_ctx, rdma_req);
	if (rc) {
		ERROR("Failed to post_send.\n");
		return rc;
	}

	rc = get_thread_wc(t_ctx, &wc, 1);
	if (rc) {
		ERROR("Failed to get wc.\n");
		return rc;
	}

	if (wc.status != IBV_WC_SUCCESS) {
		ERROR("Got bad completion with status: 0x%x, vendor syndrome: 0x%x\n",
			wc.status, wc.vendor_err);
		return 1;
	}

	return 0;
}
示例#14
0
void mca_btl_openib_endpoint_send_credits(mca_btl_openib_endpoint_t* endpoint,
        const int qp)
{
    mca_btl_openib_module_t* openib_btl = endpoint->endpoint_btl;
    mca_btl_openib_send_control_frag_t* frag;
    mca_btl_openib_rdma_credits_header_t *credits_hdr;
    int rc;
    bool do_rdma = false;
    int32_t cm_return;

    frag = endpoint->qps[qp].credit_frag;

    if(OPAL_UNLIKELY(NULL == frag)) {
        frag = alloc_control_frag(openib_btl);
        frag->qp_idx = qp;
        endpoint->qps[qp].credit_frag = frag;
        /* set those once and forever */
        to_base_frag(frag)->base.order = mca_btl_openib_component.credits_qp;
        to_base_frag(frag)->base.des_cbfunc = mca_btl_openib_endpoint_credits;
        to_base_frag(frag)->base.des_cbdata = NULL;
        to_base_frag(frag)->base.des_flags |= MCA_BTL_DES_SEND_ALWAYS_CALLBACK;;
        to_com_frag(frag)->endpoint = endpoint;
        frag->hdr->tag = MCA_BTL_TAG_IB;
        to_base_frag(frag)->segment.seg_len =
            sizeof(mca_btl_openib_rdma_credits_header_t);
    }

    assert(frag->qp_idx == qp);
    credits_hdr = (mca_btl_openib_rdma_credits_header_t*)
        to_base_frag(frag)->segment.seg_addr.pval;
    if(OPAL_SUCCESS == acquire_eager_rdma_send_credit(endpoint)) {
        do_rdma = true;
    } else {
        if(OPAL_THREAD_ADD32(&endpoint->qps[qp].u.pp_qp.cm_sent, 1) >
                (mca_btl_openib_component.qp_infos[qp].u.pp_qp.rd_rsv - 1)) {
            OPAL_THREAD_ADD32(&endpoint->qps[qp].u.pp_qp.cm_sent, -1);
            BTL_OPENIB_CREDITS_SEND_UNLOCK(endpoint, qp);
            return;
        }
     }

    BTL_OPENIB_GET_CREDITS(endpoint->qps[qp].u.pp_qp.rd_credits, frag->hdr->credits);

    frag->hdr->cm_seen = 0;
    BTL_OPENIB_GET_CREDITS(endpoint->qps[qp].u.pp_qp.cm_return, cm_return);
    if(cm_return > 255) {
        frag->hdr->cm_seen = 255;
        cm_return -= 255;
        OPAL_THREAD_ADD32(&endpoint->qps[qp].u.pp_qp.cm_return, cm_return);
    } else {
        frag->hdr->cm_seen = cm_return;
    }

    BTL_OPENIB_GET_CREDITS(endpoint->eager_rdma_local.credits, credits_hdr->rdma_credits);
    credits_hdr->qpn = qp;
    credits_hdr->control.type = MCA_BTL_OPENIB_CONTROL_CREDITS;

    if(endpoint->nbo)
         BTL_OPENIB_RDMA_CREDITS_HEADER_HTON(*credits_hdr);

    qp_reset_signal_count(endpoint, qp);
    if((rc = post_send(endpoint, frag, do_rdma, 1)) == 0)
        return;

    if(endpoint->nbo) {
        BTL_OPENIB_HEADER_NTOH(*frag->hdr);
        BTL_OPENIB_RDMA_CREDITS_HEADER_NTOH(*credits_hdr);
    }
    BTL_OPENIB_CREDITS_SEND_UNLOCK(endpoint, qp);
    OPAL_THREAD_ADD32(&endpoint->qps[qp].u.pp_qp.rd_credits,
            frag->hdr->credits);
    OPAL_THREAD_ADD32(&endpoint->eager_rdma_local.credits,
            credits_hdr->rdma_credits);
    if(do_rdma)
        OPAL_THREAD_ADD32(&endpoint->eager_rdma_remote.tokens, 1);
    else
        OPAL_THREAD_ADD32(&endpoint->qps[qp].u.pp_qp.cm_sent, -1);

    BTL_ERROR(("error posting send request errno %d says %s", rc,
                strerror(errno)));
}
示例#15
0
dare_log_entry_t* leader_handle_submit_req(struct consensus_component_t* comp, size_t data_size, void* data, uint8_t type, view_stamp* clt_id)
{
#ifdef MEASURE_LATENCY
        clock_handler c_k;
        clock_init(&c_k);
        clock_add(&c_k);
#endif


#ifdef USE_SPIN_LOCK
        pthread_spin_lock(&comp->spinlock);
#else
        pthread_mutex_lock(&comp->lock);
#endif

        view_stamp next = get_next_view_stamp(comp);

        if (type == P_TCP_CONNECT)
        {
            clt_id->view_id = next.view_id;
            clt_id->req_id = next.req_id;
        }

        db_key_type record_no = vstol(&next);

        comp->highest_seen_vs->req_id = comp->highest_seen_vs->req_id + 1;

        dare_log_entry_t *entry = log_add_new_entry(SRV_DATA->log);

        if (!log_fit_entry_header(SRV_DATA->log, SRV_DATA->log->end)) {
            SRV_DATA->log->end = 0;
        }

        SRV_DATA->log->tail = SRV_DATA->log->end;
        entry->data_size = data_size + 1;
        SRV_DATA->log->end += log_entry_len(entry);
        uint32_t offset = (uint32_t)(offsetof(dare_log_t, entries) + SRV_DATA->log->tail);

        dare_ib_ep_t *ep;
        uint32_t i, *send_count_ptr;
        int send_flags[MAX_SERVER_COUNT], poll_completion[MAX_SERVER_COUNT] = {0};
        for (i = 0; i < comp->group_size; i++) {
            ep = (dare_ib_ep_t*)SRV_DATA->config.servers[i].ep;
            if (i == *SRV_DATA->config.idx || 0 == ep->rc_connected)
                continue;
            send_count_ptr = &(ep->rc_ep.rc_qp.send_count);

            if((*send_count_ptr & S_DEPTH_) == 0)
                send_flags[i] = IBV_SEND_SIGNALED;
            else
                send_flags[i] = 0;

            if ((*send_count_ptr & S_DEPTH_) == S_DEPTH_)
                poll_completion[i] = 1;
            
            (*send_count_ptr)++;
        }

#ifdef USE_SPIN_LOCK
        pthread_spin_unlock(&comp->spinlock);
#else
        pthread_mutex_unlock(&comp->lock);
#endif

        entry->req_canbe_exed.view_id = comp->highest_committed_vs->view_id;
        entry->req_canbe_exed.req_id = comp->highest_committed_vs->req_id;
        
        if (data != NULL)
            memcpy(entry->data,data,data_size);

        entry->msg_vs = next;
        entry->node_id = *comp->node_id;
        entry->type = type;
        entry->clt_id.view_id = (type != P_NOP)?clt_id->view_id:0;
        entry->clt_id.req_id = (type != P_NOP)?clt_id->req_id:0;

        request_record* record_data = (request_record*)((char*)entry + offsetof(dare_log_entry_t, data_size));

        if(store_record(comp->db_ptr, sizeof(record_no), &record_no, REQ_RECORD_SIZE(record_data) - 1, record_data))
        {
            fprintf(stderr, "Can not save record from database.\n");
            goto handle_submit_req_exit;
        }

#ifdef MEASURE_LATENCY
        clock_add(&c_k);
#endif
        char* dummy = (char*)((char*)entry + log_entry_len(entry) - 1);
        *dummy = DUMMY_END;

        uint32_t my_id = *comp->node_id;
        uint64_t bit_map = (1<<my_id);
        rem_mem_t rm;
        memset(&rm, 0, sizeof(rem_mem_t));
        for (i = 0; i < comp->group_size; i++) {
            ep = (dare_ib_ep_t*)SRV_DATA->config.servers[i].ep;
            if (i == *SRV_DATA->config.idx || 0 == ep->rc_connected)
                continue;

            rm.raddr = ep->rc_ep.rmt_mr.raddr + offset;
            rm.rkey = ep->rc_ep.rmt_mr.rkey;

            post_send(i, entry, log_entry_len(entry), IBDEV->lcl_mr, IBV_WR_RDMA_WRITE, &rm, send_flags[i], poll_completion[i]);
        }

recheck:
        for (i = 0; i < MAX_SERVER_COUNT; i++) {
            if (entry->ack[i].msg_vs.view_id == next.view_id && entry->ack[i].msg_vs.req_id == next.req_id)
            {
                bit_map = bit_map | (1<<entry->ack[i].node_id);
            }
        }
        if (reached_quorum(bit_map, comp->group_size)) {
            //TODO: do we need the lock here?
            while (entry->msg_vs.req_id > comp->highest_committed_vs->req_id + 1);
            comp->highest_committed_vs->req_id = comp->highest_committed_vs->req_id + 1;
            
#ifdef MEASURE_LATENCY
            clock_add(&c_k);
            clock_display(comp->sys_log_file, &c_k);
#endif

        }else{
            goto recheck;
        }
handle_submit_req_exit:
    return entry;
}
示例#16
0
void *handle_accept_req(void* arg)
{
    consensus_component* comp = arg;

    db_key_type start;
    db_key_type end;
    db_key_type index;
    
    dare_log_entry_t* entry;

    set_affinity(1);

    for (;;)
    {
        if (comp->cur_view->leader_id != *comp->node_id)
        {
            comp->uc(comp->up_para);

            entry = log_get_entry(SRV_DATA->log, &SRV_DATA->log->end);

            if (entry->data_size != 0)
            {
                char* dummy = (char*)((char*)entry + log_entry_len(entry) - 1);
                if (*dummy == DUMMY_END) // atmoic opeartion
                {
#ifdef MEASURE_LATENCY
                    clock_handler c_k;
                    clock_init(&c_k);
                    clock_add(&c_k);
#endif
                    if(entry->msg_vs.view_id < comp->cur_view->view_id){
                    // TODO
                    //goto reloop;
                    }
                    // if we this message is not from the current leader
                    if(entry->msg_vs.view_id == comp->cur_view->view_id && entry->node_id != comp->cur_view->leader_id){
                    // TODO
                    //goto reloop;
                    }

                    // update highest seen request
                    if(view_stamp_comp(&entry->msg_vs, comp->highest_seen_vs) > 0){
                        *(comp->highest_seen_vs) = entry->msg_vs;
                    }

                    db_key_type record_no = vstol(&entry->msg_vs);
                    // record the data persistently
                    request_record* record_data = (request_record*)((char*)entry + offsetof(dare_log_entry_t, data_size));

                    store_record(comp->db_ptr, sizeof(record_no), &record_no, REQ_RECORD_SIZE(record_data) - 1, record_data);

#ifdef MEASURE_LATENCY
                    clock_add(&c_k);
#endif
                    SRV_DATA->log->tail = SRV_DATA->log->end;
                    SRV_DATA->log->end += log_entry_len(entry);
                    uint32_t my_id = *comp->node_id;
                    uint32_t offset = (uint32_t)(offsetof(dare_log_t, entries) + SRV_DATA->log->tail + ACCEPT_ACK_SIZE * my_id);

                    accept_ack* reply = (accept_ack*)((char*)entry + ACCEPT_ACK_SIZE * my_id);
                    reply->node_id = my_id;
                    reply->msg_vs.view_id = entry->msg_vs.view_id;
                    reply->msg_vs.req_id = entry->msg_vs.req_id;
                    
                    if (entry->type == P_OUTPUT)
                    {
                        // up = get_mapping_fd() is defined in ev_mgr.c
                        int fd = comp->ug(entry->clt_id, comp->up_para);
                        // consider entry->data as a pointer.
                        uint64_t hash = get_output_hash(fd, *(long*)entry->data);
                        reply->hash = hash;    
                    }

                    rem_mem_t rm;
                    dare_ib_ep_t *ep = (dare_ib_ep_t*)SRV_DATA->config.servers[entry->node_id].ep;
                    memset(&rm, 0, sizeof(rem_mem_t));
                    uint32_t *send_count_ptr = &(ep->rc_ep.rc_qp.send_count);
                    int send_flags, poll_completion = 0;

                    if((*send_count_ptr & S_DEPTH_) == 0)
                        send_flags = IBV_SEND_SIGNALED;
                    else
                        send_flags = 0;

                    if ((*send_count_ptr & S_DEPTH_) == S_DEPTH_)
                        poll_completion = 1;

                    (*send_count_ptr)++;

                    rm.raddr = ep->rc_ep.rmt_mr.raddr + offset;
                    rm.rkey = ep->rc_ep.rmt_mr.rkey;

                    post_send(entry->node_id, reply, ACCEPT_ACK_SIZE, IBDEV->lcl_mr, IBV_WR_RDMA_WRITE, &rm, send_flags, poll_completion);

                    if(view_stamp_comp(&entry->req_canbe_exed, comp->highest_committed_vs) > 0)
                    {
                        start = vstol(comp->highest_committed_vs)+1;
                        end = vstol(&entry->req_canbe_exed);
                        for(index = start; index <= end; index++)
                        {
                            comp->ucb(index,comp->up_para);
                        }
                        *(comp->highest_committed_vs) = entry->req_canbe_exed;
                    }
#ifdef MEASURE_LATENCY
                    clock_add(&c_k);
                    clock_display(comp->sys_log_file, &c_k);
#endif
                }   
            }
        }
    }
};
示例#17
0
/* this function is called with endpoint->endpoint_lock held */
int mca_btl_wv_endpoint_post_send(mca_btl_wv_endpoint_t *endpoint,
        mca_btl_wv_send_frag_t *frag)
{
    mca_btl_wv_header_t *hdr = frag->hdr;
    mca_btl_base_descriptor_t *des = &to_base_frag(frag)->base;
    int qp, ib_rc;
    int32_t cm_return;
    bool do_rdma = false;
    size_t eager_limit;

    if(OPAL_LIKELY(des->order == MCA_BTL_NO_ORDER))
        des->order = frag->qp_idx;

    qp = des->order;

    if(acruire_wqe(endpoint, frag) != OMPI_SUCCESS)
        return OMPI_ERR_RESOURCE_BUSY;

    eager_limit = mca_btl_wv_component.eager_limit +
        sizeof(mca_btl_wv_header_coalesced_t) +
        sizeof(mca_btl_wv_control_header_t);
    if(des->des_src->seg_len + frag->coalesced_length <= eager_limit &&
            (des->des_flags & MCA_BTL_DES_FLAGS_PRIORITY)) {
        /* High priority frag. Try to send over eager RDMA */
        if(acquire_eager_rdma_send_credit(endpoint) == OMPI_SUCCESS)
            do_rdma = true;
    }

    if(!do_rdma && acquire_send_credit(endpoint, frag) != OMPI_SUCCESS) {
        qp_put_wqe(endpoint, qp);
        return OMPI_ERR_RESOURCE_BUSY;
    }

    BTL_WV_GET_CREDITS(endpoint->eager_rdma_local.credits, hdr->credits);
    if(hdr->credits)
        hdr->credits |= BTL_WV_RDMA_CREDITS_FLAG;

    if(!do_rdma) {
        if(BTL_WV_QP_TYPE_PP(qp) && 0 == hdr->credits) {
            BTL_WV_GET_CREDITS(endpoint->qps[qp].u.pp_qp.rd_credits, hdr->credits);
        }
    } else {
        hdr->credits |= (qp << 11);
    }

    BTL_WV_GET_CREDITS(endpoint->qps[qp].u.pp_qp.cm_return, cm_return);
    /* cm_seen is only 8 bytes, but cm_return is 32 bytes */
    if(cm_return > 255) {
        hdr->cm_seen = 255;
        cm_return -= 255;
        OPAL_THREAD_ADD32(&endpoint->qps[qp].u.pp_qp.cm_return, cm_return);
    } else {
        hdr->cm_seen = cm_return;
    }

    ib_rc = post_send(endpoint, frag, do_rdma);

    if(!ib_rc)
        return OMPI_SUCCESS;

    if(endpoint->nbo)
        BTL_WV_HEADER_NTOH(*hdr);

    if(BTL_WV_IS_RDMA_CREDITS(hdr->credits)) {
        OPAL_THREAD_ADD32(&endpoint->eager_rdma_local.credits,
                BTL_WV_CREDITS(hdr->credits));
    }

    qp_put_wqe(endpoint, qp);

    if(do_rdma) {
        OPAL_THREAD_ADD32(&endpoint->eager_rdma_remote.tokens, 1);
    } else {
        if(BTL_WV_QP_TYPE_PP(qp)) {
            OPAL_THREAD_ADD32(&endpoint->qps[qp].u.pp_qp.rd_credits,
                    hdr->credits);
            OPAL_THREAD_ADD32(&endpoint->qps[qp].u.pp_qp.sd_credits, 1);
        } else if BTL_WV_QP_TYPE_SRQ(qp){
            mca_btl_wv_module_t *wv_btl = endpoint->endpoint_btl;
            OPAL_THREAD_ADD32(&wv_btl->qps[qp].u.srq_qp.sd_credits, 1);
        }
    }
    BTL_ERROR(("error posting send request error %d: %s\n",
               ib_rc, strerror(ib_rc)));
    return OMPI_ERROR;
}
示例#18
0
Boolean initNeWS(void)
{
    static NeWStoken newstoken;

    /* now try to open the connection to the NeWS server */
    if (ps_open_PostScript() == (PSFILE *) NULL)
	return False;

    psio_sigpipe_handler_struct.sa_handler = psio_sigpipe_handler;
    sigemptyset(&psio_sigpipe_handler_struct.sa_mask);

#if !FLAKY_SIGPOLL
    if (fcntl(PostScript->file, F_SETOWN, getpid()) == -1)
	perror("xdvi: fcntl F_SETOWN");
    if (fcntl(PostScript->file, F_SETFL,
	      fcntl(PostScript->file, F_GETFL, 0) | FASYNC) == -1)
	perror("xdvi: fcntl F_SETFL");
#endif /* not FLAKY_SIGPOLL */
    if (PostScriptInput->file != PostScript->file) {
#if !FLAKY_SIGPOLL
	if (fcntl(PostScriptInput->file, F_SETOWN, getpid()) == -1)
	    perror("xdvi: fcntl F_SETOWN");
	if (fcntl(PostScriptInput->file, F_SETFL,
		  fcntl(PostScriptInput->file, F_GETFL, 0) | FASYNC) == -1)
	    perror("xdvi: fcntl F_SETFL");
#endif /* not FLAKY_SIGPOLL */
	NeWS_xout.xio_events &= ~XIO_IN;
	NeWS_xin.fd = PostScriptInput->file;
	set_io(&NeWS_xin);
    }
    NeWS_xout.fd = PostScript->file;
    set_io(&NeWS_xout);

    NeWS_active = False;
    NeWS_in_header = True;
    NeWS_ev_mask = NEWS_MASK_INIT;
    NeWS_pending = 1;

    ps_flush_PostScript();
    NeWS_send(str0, (sizeof str0) - 1);
    /* get xid of window, then make this window the NeWS canvas */
    (void)ps_token_from_xid(mane.win, &newstoken);
    if (newstoken != -1) {
	ps_setcanvas(newstoken);
	ps_flush_PostScript();

	NeWS_send(preamble, (sizeof preamble) - 1);
	NeWS_send(psheader, psheaderlen);
	NeWS_send(preamble2, (sizeof preamble2) - 1);
	NeWS_in_header = False;
	post_send();
	waitack();
    }

    if (NeWS_destroyed)
	return False;

    /* success */

    NeWS_mag = NeWS_shrink = -1;
    NeWS_page_w = globals.page.w;
    NeWS_page_h = globals.page.h;

    psp = news_procs;
    if (!resource.postscript)
	toggleNeWS(0);	/* if we got a 'v' already */

    return True;
}