void lustre_swab_llogd_body (struct llogd_body *d) { print_llogd_body(d); lustre_swab_llog_id(&d->lgd_logid); __swab32s (&d->lgd_ctxt_idx); __swab32s (&d->lgd_llh_flags); __swab32s (&d->lgd_index); __swab32s (&d->lgd_saved_index); __swab32s (&d->lgd_len); __swab64s (&d->lgd_cur_offset); print_llogd_body(d); }
/** * Swab, if needed, LOA (for OST-object only) structure with LMA EA and PFID EA * combined together are stored on-disk in little-endian order. * * \param[in] loa - the pointer to the LOA structure to be swabbed. * \param[in] to_cpu - to indicate swab for CPU order or not. */ void lustre_loa_swab(struct lustre_ost_attrs *loa, bool to_cpu) { struct lustre_mdt_attrs *lma = &loa->loa_lma; #ifdef __BIG_ENDIAN __u32 compat = lma->lma_compat; #endif lustre_lma_swab(lma); #ifdef __BIG_ENDIAN if (to_cpu) compat = lma->lma_compat; if (compat & LMAC_STRIPE_INFO) { lustre_swab_lu_fid(&loa->loa_parent_fid); __swab32s(&loa->loa_stripe_size); } if (compat & LMAC_COMP_INFO) { __swab32s(&loa->loa_comp_id); __swab64s(&loa->loa_comp_start); __swab64s(&loa->loa_comp_end); } #endif }
static int out_write(struct tgt_session_info *tsi) { struct tgt_thread_info *tti = tgt_th_info(tsi->tsi_env); struct object_update *update = tti->tti_u.update.tti_update; struct dt_object *obj = tti->tti_u.update.tti_dt_object; struct lu_buf *lbuf = &tti->tti_buf; char *buf; __u64 *tmp; size_t size = 0; size_t buf_len = 0; loff_t pos; int rc; ENTRY; buf = object_update_param_get(update, 0, &buf_len); if (IS_ERR(buf) || buf_len == 0) { CERROR("%s: empty buf for xattr set: rc = %ld\n", tgt_name(tsi->tsi_tgt), PTR_ERR(buf)); RETURN(PTR_ERR(buf)); } lbuf->lb_buf = buf; lbuf->lb_len = buf_len; tmp = object_update_param_get(update, 1, &size); if (IS_ERR(tmp) || size != sizeof(*tmp)) { CERROR("%s: empty or wrong size %zu pos: rc = %ld\n", tgt_name(tsi->tsi_tgt), size, PTR_ERR(tmp)); RETURN(PTR_ERR(tmp)); } if (ptlrpc_req_need_swab(tsi->tsi_pill->rc_req)) __swab64s(tmp); pos = *tmp; rc = out_tx_write(tsi->tsi_env, obj, lbuf, pos, &tti->tti_tea, tti->tti_tea.ta_handle, tti->tti_u.update.tti_update_reply, tti->tti_u.update.tti_update_reply_index); RETURN(rc); }
void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg) { int i; __swab32s(&lcfg->lcfg_version); if (lcfg->lcfg_version != LUSTRE_CFG_VERSION) { CERROR("not swabbing lustre_cfg version %#x (expecting %#x)\n", lcfg->lcfg_version, LUSTRE_CFG_VERSION); return; } __swab32s(&lcfg->lcfg_command); __swab32s(&lcfg->lcfg_num); __swab32s(&lcfg->lcfg_flags); __swab64s(&lcfg->lcfg_nid); __swab32s(&lcfg->lcfg_bufcount); for (i = 0; i < lcfg->lcfg_bufcount && i < LUSTRE_CFG_MAX_BUFCOUNT; i++) __swab32s(&lcfg->lcfg_buflens[i]); print_lustre_cfg(lcfg); }
static int out_write(struct tgt_session_info *tsi) { struct tgt_thread_info *tti = tgt_th_info(tsi->tsi_env); struct object_update *update = tti->tti_u.update.tti_update; struct dt_object *obj = tti->tti_u.update.tti_dt_object; struct lu_buf *lbuf = &tti->tti_buf; char *buf; char *tmp; int buf_len = 0; loff_t pos; int rc; ENTRY; buf = object_update_param_get(update, 0, &buf_len); if (buf == NULL || buf_len == 0) { CERROR("%s: empty buf for xattr set: rc = %d\n", tgt_name(tsi->tsi_tgt), -EPROTO); RETURN(err_serious(-EPROTO)); } lbuf->lb_buf = buf; lbuf->lb_len = buf_len; tmp = (char *)object_update_param_get(update, 1, NULL); if (tmp == NULL) { CERROR("%s: empty flag for xattr set: rc = %d\n", tgt_name(tsi->tsi_tgt), -EPROTO); RETURN(err_serious(-EPROTO)); } if (ptlrpc_req_need_swab(tsi->tsi_pill->rc_req)) __swab64s((__u64 *)tmp); pos = *(loff_t *)tmp; rc = out_tx_write(tsi->tsi_env, obj, lbuf, pos, &tti->tti_tea, tti->tti_u.update.tti_update_reply, tti->tti_u.update.tti_update_reply_index); RETURN(rc); }
void lustre_swab_llog_id(struct llog_logid *log_id) { __swab64s(&log_id->lgl_oi.oi.oi_id); __swab64s(&log_id->lgl_oi.oi.oi_seq); __swab32s(&log_id->lgl_ogen); }
void lustre_swab_lu_fid(struct lu_fid *fid) { __swab64s (&fid->f_seq); __swab32s (&fid->f_oid); __swab32s (&fid->f_ver); }
void lustre_swab_llog_rec(struct llog_rec_hdr *rec) { struct llog_rec_tail *tail = NULL; __swab32s(&rec->lrh_len); __swab32s(&rec->lrh_index); __swab32s(&rec->lrh_type); __swab32s(&rec->lrh_id); switch (rec->lrh_type) { case OST_SZ_REC: { struct llog_size_change_rec *lsc = (struct llog_size_change_rec *)rec; lustre_swab_ll_fid(&lsc->lsc_fid); __swab32s(&lsc->lsc_ioepoch); tail = &lsc->lsc_tail; break; } case MDS_UNLINK_REC: { struct llog_unlink_rec *lur = (struct llog_unlink_rec *)rec; __swab64s(&lur->lur_oid); __swab32s(&lur->lur_oseq); __swab32s(&lur->lur_count); tail = &lur->lur_tail; break; } case MDS_UNLINK64_REC: { struct llog_unlink64_rec *lur = (struct llog_unlink64_rec *)rec; lustre_swab_lu_fid(&lur->lur_fid); __swab32s(&lur->lur_count); tail = &lur->lur_tail; break; } case CHANGELOG_REC: { struct llog_changelog_rec *cr = (struct llog_changelog_rec*)rec; __swab16s(&cr->cr.cr_namelen); __swab16s(&cr->cr.cr_flags); __swab32s(&cr->cr.cr_type); __swab64s(&cr->cr.cr_index); __swab64s(&cr->cr.cr_prev); __swab64s(&cr->cr.cr_time); lustre_swab_lu_fid(&cr->cr.cr_tfid); lustre_swab_lu_fid(&cr->cr.cr_pfid); if (CHANGELOG_REC_EXTENDED(&cr->cr)) { struct llog_changelog_ext_rec *ext = (struct llog_changelog_ext_rec *)rec; lustre_swab_lu_fid(&ext->cr.cr_sfid); lustre_swab_lu_fid(&ext->cr.cr_spfid); tail = &ext->cr_tail; } else { tail = &cr->cr_tail; } break; } case CHANGELOG_USER_REC: { struct llog_changelog_user_rec *cur = (struct llog_changelog_user_rec*)rec; __swab32s(&cur->cur_id); __swab64s(&cur->cur_endrec); tail = &cur->cur_tail; break; } case HSM_AGENT_REC: { struct llog_agent_req_rec *arr = (struct llog_agent_req_rec *)rec; __swab32s(&arr->arr_hai.hai_len); __swab32s(&arr->arr_hai.hai_action); lustre_swab_lu_fid(&arr->arr_hai.hai_fid); lustre_swab_lu_fid(&arr->arr_hai.hai_dfid); __swab64s(&arr->arr_hai.hai_cookie); __swab64s(&arr->arr_hai.hai_extent.offset); __swab64s(&arr->arr_hai.hai_extent.length); __swab64s(&arr->arr_hai.hai_gid); /* no swabing for opaque data */ /* hai_data[0]; */ break; } case MDS_SETATTR64_REC: { struct llog_setattr64_rec *lsr = (struct llog_setattr64_rec *)rec; lustre_swab_ost_id(&lsr->lsr_oi); __swab32s(&lsr->lsr_uid); __swab32s(&lsr->lsr_uid_h); __swab32s(&lsr->lsr_gid); __swab32s(&lsr->lsr_gid_h); tail = &lsr->lsr_tail; break; } case OBD_CFG_REC: /* these are swabbed as they are consumed */ break; case LLOG_HDR_MAGIC: { struct llog_log_hdr *llh = (struct llog_log_hdr *)rec; __swab64s(&llh->llh_timestamp); __swab32s(&llh->llh_count); __swab32s(&llh->llh_bitmap_offset); __swab32s(&llh->llh_flags); __swab32s(&llh->llh_size); __swab32s(&llh->llh_cat_idx); tail = &llh->llh_tail; break; } case LLOG_LOGID_MAGIC: { struct llog_logid_rec *lid = (struct llog_logid_rec *)rec; lustre_swab_llog_id(&lid->lid_id); tail = &lid->lid_tail; break; } case LLOG_GEN_REC: { struct llog_gen_rec *lgr = (struct llog_gen_rec *)rec; __swab64s(&lgr->lgr_gen.mnt_cnt); __swab64s(&lgr->lgr_gen.conn_cnt); tail = &lgr->lgr_tail; break; } case LLOG_PAD_MAGIC: break; default: CERROR("Unknown llog rec type %#x swabbing rec %p\n", rec->lrh_type, rec); } if (tail) { __swab32s(&tail->lrt_len); __swab32s(&tail->lrt_index); } }
void lustre_swab_ll_fid(struct ll_fid *fid) { __swab64s (&fid->id); __swab32s (&fid->generation); __swab32s (&fid->f_type); }
int lnet_accept(socket_t *sock, __u32 magic) { lnet_acceptor_connreq_t cr; __u32 peer_ip; int peer_port; int rc; int flip; lnet_ni_t *ni; char *str; LASSERT(sizeof(cr) <= 16); /* not too big for the stack */ rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port); LASSERT(rc == 0); /* we succeeded before */ if (!lnet_accept_magic(magic, LNET_PROTO_ACCEPTOR_MAGIC)) { if (lnet_accept_magic(magic, LNET_PROTO_MAGIC)) { /* future version compatibility! * When LNET unifies protocols over all LNDs, the first * thing sent will be a version query. I send back * LNET_PROTO_ACCEPTOR_MAGIC to tell her I'm "old" */ memset(&cr, 0, sizeof(cr)); cr.acr_magic = LNET_PROTO_ACCEPTOR_MAGIC; cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION; rc = libcfs_sock_write(sock, &cr, sizeof(cr), accept_timeout); if (rc != 0) CERROR("Error sending magic+version in response to LNET magic from %pI4h: %d\n", &peer_ip, rc); return -EPROTO; } if (magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) str = "'old' socknal/tcpnal"; else if (lnet_accept_magic(magic, LNET_PROTO_RA_MAGIC)) str = "'old' ranal"; else str = "unrecognised"; LCONSOLE_ERROR_MSG(0x11f, "Refusing connection from %pI4h magic %08x: %s acceptor protocol\n", &peer_ip, magic, str); return -EPROTO; } flip = (magic != LNET_PROTO_ACCEPTOR_MAGIC); rc = libcfs_sock_read(sock, &cr.acr_version, sizeof(cr.acr_version), accept_timeout); if (rc != 0) { CERROR("Error %d reading connection request version from %pI4h\n", rc, &peer_ip); return -EIO; } if (flip) __swab32s(&cr.acr_version); if (cr.acr_version != LNET_PROTO_ACCEPTOR_VERSION) { /* future version compatibility! * An acceptor-specific protocol rev will first send a version * query. I send back my current version to tell her I'm * "old". */ int peer_version = cr.acr_version; memset(&cr, 0, sizeof(cr)); cr.acr_magic = LNET_PROTO_ACCEPTOR_MAGIC; cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION; rc = libcfs_sock_write(sock, &cr, sizeof(cr), accept_timeout); if (rc != 0) CERROR("Error sending magic+version in response to version %d from %pI4h: %d\n", peer_version, &peer_ip, rc); return -EPROTO; } rc = libcfs_sock_read(sock, &cr.acr_nid, sizeof(cr) - offsetof(lnet_acceptor_connreq_t, acr_nid), accept_timeout); if (rc != 0) { CERROR("Error %d reading connection request from %pI4h\n", rc, &peer_ip); return -EIO; } if (flip) __swab64s(&cr.acr_nid); ni = lnet_net2ni(LNET_NIDNET(cr.acr_nid)); if (ni == NULL || /* no matching net */ ni->ni_nid != cr.acr_nid) { /* right NET, wrong NID! */ if (ni != NULL) lnet_ni_decref(ni); LCONSOLE_ERROR_MSG(0x120, "Refusing connection from %pI4h for %s: No matching NI\n", &peer_ip, libcfs_nid2str(cr.acr_nid)); return -EPERM; } if (ni->ni_lnd->lnd_accept == NULL) { /* This catches a request for the loopback LND */ lnet_ni_decref(ni); LCONSOLE_ERROR_MSG(0x121, "Refusing connection from %pI4h for %s: NI doesn not accept IP connections\n", &peer_ip, libcfs_nid2str(cr.acr_nid)); return -EPERM; } CDEBUG(D_NET, "Accept %s from %pI4h\n", libcfs_nid2str(cr.acr_nid), &peer_ip); rc = ni->ni_lnd->lnd_accept(ni, sock); lnet_ni_decref(ni); return rc; }
void lustre_swab_llog_rec(struct llog_rec_hdr *rec) { struct llog_rec_tail *tail = NULL; __swab32s(&rec->lrh_len); __swab32s(&rec->lrh_index); __swab32s(&rec->lrh_type); __swab32s(&rec->lrh_id); switch (rec->lrh_type) { case OST_SZ_REC: { struct llog_size_change_rec *lsc = (struct llog_size_change_rec *)rec; lustre_swab_ll_fid(&lsc->lsc_fid); __swab32s(&lsc->lsc_ioepoch); tail = &lsc->lsc_tail; break; } case MDS_UNLINK_REC: { struct llog_unlink_rec *lur = (struct llog_unlink_rec *)rec; __swab64s(&lur->lur_oid); __swab32s(&lur->lur_oseq); __swab32s(&lur->lur_count); tail = &lur->lur_tail; break; } case MDS_UNLINK64_REC: { struct llog_unlink64_rec *lur = (struct llog_unlink64_rec *)rec; lustre_swab_lu_fid(&lur->lur_fid); __swab32s(&lur->lur_count); tail = &lur->lur_tail; break; } case CHANGELOG_REC: { struct llog_changelog_rec *cr = (struct llog_changelog_rec *)rec; __swab16s(&cr->cr.cr_namelen); __swab16s(&cr->cr.cr_flags); __swab32s(&cr->cr.cr_type); __swab64s(&cr->cr.cr_index); __swab64s(&cr->cr.cr_prev); __swab64s(&cr->cr.cr_time); lustre_swab_lu_fid(&cr->cr.cr_tfid); lustre_swab_lu_fid(&cr->cr.cr_pfid); if (cr->cr.cr_flags & CLF_RENAME) { struct changelog_ext_rename *rnm = changelog_rec_rename(&cr->cr); lustre_swab_lu_fid(&rnm->cr_sfid); lustre_swab_lu_fid(&rnm->cr_spfid); } /* Because the tail follows a variable-length structure we need * to compute its location at runtime */ tail = (struct llog_rec_tail *)((char *)&cr->cr + changelog_rec_size(&cr->cr) + cr->cr.cr_namelen); break; } case CHANGELOG_USER_REC: { struct llog_changelog_user_rec *cur = (struct llog_changelog_user_rec*)rec; __swab32s(&cur->cur_id); __swab64s(&cur->cur_endrec); tail = &cur->cur_tail; break; } case HSM_AGENT_REC: { struct llog_agent_req_rec *arr = (struct llog_agent_req_rec *)rec; __swab32s(&arr->arr_hai.hai_len); __swab32s(&arr->arr_hai.hai_action); lustre_swab_lu_fid(&arr->arr_hai.hai_fid); lustre_swab_lu_fid(&arr->arr_hai.hai_dfid); __swab64s(&arr->arr_hai.hai_cookie); __swab64s(&arr->arr_hai.hai_extent.offset); __swab64s(&arr->arr_hai.hai_extent.length); __swab64s(&arr->arr_hai.hai_gid); /* no swabing for opaque data */ /* hai_data[0]; */ break; } case MDS_SETATTR64_REC: { struct llog_setattr64_rec *lsr = (struct llog_setattr64_rec *)rec; lustre_swab_ost_id(&lsr->lsr_oi); __swab32s(&lsr->lsr_uid); __swab32s(&lsr->lsr_uid_h); __swab32s(&lsr->lsr_gid); __swab32s(&lsr->lsr_gid_h); __swab64s(&lsr->lsr_valid); tail = &lsr->lsr_tail; break; } case OBD_CFG_REC: /* these are swabbed as they are consumed */ break; case LLOG_HDR_MAGIC: { struct llog_log_hdr *llh = (struct llog_log_hdr *)rec; __swab64s(&llh->llh_timestamp); __swab32s(&llh->llh_count); __swab32s(&llh->llh_bitmap_offset); __swab32s(&llh->llh_flags); __swab32s(&llh->llh_size); __swab32s(&llh->llh_cat_idx); tail = LLOG_HDR_TAIL(llh); break; } case LLOG_LOGID_MAGIC: { struct llog_logid_rec *lid = (struct llog_logid_rec *)rec; lustre_swab_llog_id(&lid->lid_id); tail = &lid->lid_tail; break; } case LLOG_GEN_REC: { struct llog_gen_rec *lgr = (struct llog_gen_rec *)rec; __swab64s(&lgr->lgr_gen.mnt_cnt); __swab64s(&lgr->lgr_gen.conn_cnt); tail = &lgr->lgr_tail; break; } case LLOG_PAD_MAGIC: break; case UPDATE_REC: { struct llog_update_record *lur = (struct llog_update_record *)rec; struct update_records *record = &lur->lur_update_rec; __swab32s(&record->ur_flags); __swab64s(&record->ur_batchid); __swab64s(&record->ur_master_transno); __swab32s(&record->ur_param_count); __swab32s(&record->ur_update_count); lustre_swab_update_ops(&record->ur_ops, record->ur_update_count); /* Compute tail location. */ tail = (struct llog_rec_tail *)((char *)record + update_records_size(record)); break; } default: CERROR("Unknown llog rec type %#x swabbing rec %p\n", rec->lrh_type, rec); } if (tail) { __swab32s(&tail->lrt_len); __swab32s(&tail->lrt_index); } }
/* Switch on rx_state. * Return 0 on success, else return <0 * Always set cont_flag: 1 if we're ready to continue reading, else 0 */ int usocklnd_read_hello(usock_conn_t *conn, int *cont_flag) { int rc = 0; ksock_hello_msg_t *hello = conn->uc_rx_hello; *cont_flag = 0; /* smth. new emerged in hello - let's process it */ switch (conn->uc_rx_state) { case UC_RX_HELLO_MAGIC: if (hello->kshm_magic == LNET_PROTO_MAGIC) conn->uc_flip = 0; else if (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC)) conn->uc_flip = 1; else return -EPROTO; usocklnd_rx_helloversion_state_transition(conn); *cont_flag = 1; break; case UC_RX_HELLO_VERSION: if ((!conn->uc_flip && (hello->kshm_version != KSOCK_PROTO_V2)) || (conn->uc_flip && (hello->kshm_version != __swab32(KSOCK_PROTO_V2)))) return -EPROTO; usocklnd_rx_hellobody_state_transition(conn); *cont_flag = 1; break; case UC_RX_HELLO_BODY: if (conn->uc_flip) { ksock_hello_msg_t *hello = conn->uc_rx_hello; __swab32s(&hello->kshm_src_pid); __swab64s(&hello->kshm_src_nid); __swab32s(&hello->kshm_dst_pid); __swab64s(&hello->kshm_dst_nid); __swab64s(&hello->kshm_src_incarnation); __swab64s(&hello->kshm_dst_incarnation); __swab32s(&hello->kshm_ctype); __swab32s(&hello->kshm_nips); } if (conn->uc_rx_hello->kshm_nips > LNET_MAX_INTERFACES) { CERROR("Bad nips %d from ip %u.%u.%u.%u port %d\n", conn->uc_rx_hello->kshm_nips, HIPQUAD(conn->uc_peer_ip), conn->uc_peer_port); return -EPROTO; } if (conn->uc_rx_hello->kshm_nips) { usocklnd_rx_helloIPs_state_transition(conn); *cont_flag = 1; break; } /* fall through */ case UC_RX_HELLO_IPS: if (conn->uc_activeflag == 1) /* active conn */ rc = usocklnd_activeconn_hellorecv(conn); else /* passive conn */ rc = usocklnd_passiveconn_hellorecv(conn); break; default: LBUG(); /* unknown state */ } return rc; }
/* Switch on rx_state. * Return 0 on success, 1 if whole packet is read, else return <0 * Always set cont_flag: 1 if we're ready to continue reading, else 0 * NB: If whole packet is read, cont_flag will be set to zero to take * care of fairess */ int usocklnd_read_msg(usock_conn_t *conn, int *cont_flag) { int rc = 0; __u64 cookie; *cont_flag = 0; /* smth. new emerged in RX part - let's process it */ switch (conn->uc_rx_state) { case UC_RX_KSM_HEADER: if (conn->uc_flip) { __swab32s(&conn->uc_rx_msg.ksm_type); __swab32s(&conn->uc_rx_msg.ksm_csum); __swab64s(&conn->uc_rx_msg.ksm_zc_cookies[0]); __swab64s(&conn->uc_rx_msg.ksm_zc_cookies[1]); } /* we never send packets for wich zc-acking is required */ if (conn->uc_rx_msg.ksm_type != KSOCK_MSG_LNET || conn->uc_rx_msg.ksm_zc_cookies[1] != 0) { conn->uc_errored = 1; return -EPROTO; } /* zc_req will be processed later, when lnet payload will be received */ usocklnd_rx_lnethdr_state_transition(conn); *cont_flag = 1; break; case UC_RX_LNET_HEADER: if (the_lnet.ln_pid & LNET_PID_USERFLAG) { /* replace dest_nid,pid (ksocknal sets its own) */ conn->uc_rx_msg.ksm_u.lnetmsg.ksnm_hdr.dest_nid = cpu_to_le64(conn->uc_peer->up_ni->ni_nid); conn->uc_rx_msg.ksm_u.lnetmsg.ksnm_hdr.dest_pid = cpu_to_le32(the_lnet.ln_pid); } else if (conn->uc_peer->up_peerid.pid & LNET_PID_USERFLAG) { /* Userspace peer */ lnet_process_id_t *id = &conn->uc_peer->up_peerid; lnet_hdr_t *lhdr = &conn->uc_rx_msg.ksm_u.lnetmsg.ksnm_hdr; /* Substitute process ID assigned at connection time */ lhdr->src_pid = cpu_to_le32(id->pid); lhdr->src_nid = cpu_to_le64(id->nid); } conn->uc_rx_state = UC_RX_PARSE; usocklnd_conn_addref(conn); /* ++ref while parsing */ rc = lnet_parse(conn->uc_peer->up_ni, &conn->uc_rx_msg.ksm_u.lnetmsg.ksnm_hdr, conn->uc_peerid.nid, conn, 0); if (rc < 0) { /* I just received garbage: give up on this conn */ conn->uc_errored = 1; usocklnd_conn_decref(conn); return -EPROTO; } /* Race with usocklnd_recv() is possible */ pthread_mutex_lock(&conn->uc_lock); LASSERT (conn->uc_rx_state == UC_RX_PARSE || conn->uc_rx_state == UC_RX_LNET_PAYLOAD); /* check whether usocklnd_recv() got called */ if (conn->uc_rx_state == UC_RX_LNET_PAYLOAD) *cont_flag = 1; pthread_mutex_unlock(&conn->uc_lock); break; case UC_RX_PARSE: LBUG(); /* it's error to be here, because this special * case is handled by caller */ break; case UC_RX_PARSE_WAIT: LBUG(); /* it's error to be here, because the conn * shouldn't wait for POLLIN event in this * state */ break; case UC_RX_LNET_PAYLOAD: /* payload all received */ lnet_finalize(conn->uc_peer->up_ni, conn->uc_rx_lnetmsg, 0); cookie = conn->uc_rx_msg.ksm_zc_cookies[0]; if (cookie != 0) rc = usocklnd_handle_zc_req(conn->uc_peer, cookie); if (rc != 0) { /* change state not to finalize twice */ conn->uc_rx_state = UC_RX_KSM_HEADER; return -EPROTO; } /* Fall through */ case UC_RX_SKIPPING: if (conn->uc_rx_nob_left != 0) { usocklnd_rx_skipping_state_transition(conn); *cont_flag = 1; } else { usocklnd_rx_ksmhdr_state_transition(conn); rc = 1; /* whole packet is read */ } break; default: LBUG(); /* unknown state */ } return rc; }