void lustre_swab_lu_seq_range(struct lu_seq_range *range) { __swab64s (&range->lsr_start); __swab64s (&range->lsr_end); __swab32s (&range->lsr_index); __swab32s (&range->lsr_flags); }
/** * Swab, if needed, LMA structure which is stored on-disk in little-endian order. * * \param lma - is a pointer to the LMA structure to be swabbed. */ void lustre_lma_swab(struct lustre_mdt_attrs *lma) { #ifdef __BIG_ENDIAN __swab32s(&lma->lma_compat); __swab32s(&lma->lma_incompat); lustre_swab_lu_fid(&lma->lma_self_fid); #endif }
/** * Swab, if needed, LMA structure which is stored on-disk in little-endian order. * * \param lma - is a pointer to the LMA structure to be swabbed. */ void lustre_lma_swab(struct lustre_mdt_attrs *lma) { /* Use LUSTRE_MSG_MAGIC to detect local endianess. */ if (LUSTRE_MSG_MAGIC != cpu_to_le32(LUSTRE_MSG_MAGIC)) { __swab32s(&lma->lma_compat); __swab32s(&lma->lma_incompat); lustre_swab_lu_fid(&lma->lma_self_fid); } };
/** * Swab, if needed, HSM structure which is stored on-disk in little-endian * order. * * \param attrs - is a pointer to the HSM structure to be swabbed. */ void lustre_hsm_swab(struct hsm_attrs *attrs) { #ifdef __BIG_ENDIAN __swab32s(&attrs->hsm_compat); __swab32s(&attrs->hsm_flags); __swab64s(&attrs->hsm_arch_id); __swab64s(&attrs->hsm_arch_ver); #endif }
/** * Swab, if needed, HSM structure which is stored on-disk in little-endian * order. * * \param attrs - is a pointer to the HSM structure to be swabbed. */ void lustre_hsm_swab(struct hsm_attrs *attrs) { /* Use LUSTRE_MSG_MAGIC to detect local endianess. */ if (LUSTRE_MSG_MAGIC != cpu_to_le32(LUSTRE_MSG_MAGIC)) { __swab32s(&attrs->hsm_compat); __swab32s(&attrs->hsm_flags); __swab64s(&attrs->hsm_arch_id); __swab64s(&attrs->hsm_arch_ver); } };
/** * Swab, if needed, SOM structure which is stored on-disk in little-endian * order. * * \param attrs - is a pointer to the SOM structure to be swabbed. */ void lustre_som_swab(struct som_attrs *attrs) { /* Use LUSTRE_MSG_MAGIC to detect local endianess. */ if (LUSTRE_MSG_MAGIC != cpu_to_le32(LUSTRE_MSG_MAGIC)) { __swab32s(&attrs->som_compat); __swab32s(&attrs->som_incompat); __swab64s(&attrs->som_ioepoch); __swab64s(&attrs->som_size); __swab64s(&attrs->som_blocks); __swab64s(&attrs->som_mountid); } };
void lustre_swab_llogd_body (struct llogd_body *d) { print_llogd_body(d); lustre_swab_llog_id(&d->lgd_logid); __swab32s (&d->lgd_ctxt_idx); __swab32s (&d->lgd_llh_flags); __swab32s (&d->lgd_index); __swab32s (&d->lgd_saved_index); __swab32s (&d->lgd_len); __swab64s (&d->lgd_cur_offset); print_llogd_body(d); }
static void wl12xx_spi_init(struct device *child) { struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); struct spi_transfer t; struct spi_message m; u8 *cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL); if (!cmd) { dev_err(child->parent, "could not allocate cmd for spi init\n"); return; } memset(&t, 0, sizeof(t)); spi_message_init(&m); /* * Set WSPI_INIT_COMMAND * the data is being send from the MSB to LSB */ cmd[0] = 0xff; cmd[1] = 0xff; cmd[2] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX; cmd[3] = 0; cmd[4] = 0; cmd[5] = HW_ACCESS_WSPI_INIT_CMD_MASK << 3; cmd[5] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN; cmd[6] = WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS; if (HW_ACCESS_WSPI_FIXED_BUSY_LEN == 0) cmd[6] |= WSPI_INIT_CMD_DIS_FIXEDBUSY; else cmd[6] |= WSPI_INIT_CMD_EN_FIXEDBUSY; cmd[7] = crc7_be(0, cmd+2, WSPI_INIT_CMD_CRC_LEN) | WSPI_INIT_CMD_END; /* * The above is the logical order; it must actually be stored * in the buffer byte-swapped. */ __swab32s((u32 *)cmd); __swab32s((u32 *)cmd+1); t.tx_buf = cmd; t.len = WSPI_INIT_CMD_LEN; spi_message_add_tail(&t, &m); spi_sync(to_spi_device(glue->dev), &m); kfree(cmd); }
static void wl1251_spi_wake(struct wl1251 *wl) { struct spi_transfer t; struct spi_message m; u8 *cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL); if (!cmd) { wl1251_error("could not allocate cmd for spi init"); return; } memset(&t, 0, sizeof(t)); spi_message_init(&m); /* Set WSPI_INIT_COMMAND * the data is being send from the MSB to LSB */ cmd[0] = 0xff; cmd[1] = 0xff; cmd[2] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX; cmd[3] = 0; cmd[4] = 0; cmd[5] = HW_ACCESS_WSPI_INIT_CMD_MASK << 3; cmd[5] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN; cmd[6] = WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS; if (HW_ACCESS_WSPI_FIXED_BUSY_LEN == 0) cmd[6] |= WSPI_INIT_CMD_DIS_FIXEDBUSY; else cmd[6] |= WSPI_INIT_CMD_EN_FIXEDBUSY; cmd[7] = crc7_be(0, cmd+2, WSPI_INIT_CMD_CRC_LEN) | WSPI_INIT_CMD_END; /* * The above is the logical order; it must actually be stored * in the buffer byte-swapped. */ __swab32s((u32 *)cmd); __swab32s((u32 *)cmd+1); t.tx_buf = cmd; t.len = WSPI_INIT_CMD_LEN; spi_message_add_tail(&t, &m); spi_sync(wl_to_spi(wl), &m); wl1251_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN); kfree(cmd); }
static void ping_client_done_rpc (sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc) { sfw_test_instance_t *tsi = tsu->tsu_instance; sfw_session_t *sn = tsi->tsi_batch->bat_session; srpc_ping_reqst_t *reqst = &rpc->crpc_reqstmsg.msg_body.ping_reqst; srpc_ping_reply_t *reply = &rpc->crpc_replymsg.msg_body.ping_reply; struct timeval tv; LASSERT (sn != NULL); if (rpc->crpc_status != 0) { if (!tsi->tsi_stopping) /* rpc could have been aborted */ atomic_inc(&sn->sn_ping_errors); CERROR ("Unable to ping %s (%d): %d\n", libcfs_id2str(rpc->crpc_dest), reqst->pnr_seq, rpc->crpc_status); return; } if (rpc->crpc_replymsg.msg_magic != SRPC_MSG_MAGIC) { __swab32s(&reply->pnr_seq); __swab32s(&reply->pnr_magic); __swab32s(&reply->pnr_status); } if (reply->pnr_magic != LST_PING_TEST_MAGIC) { rpc->crpc_status = -EBADMSG; atomic_inc(&sn->sn_ping_errors); CERROR ("Bad magic %u from %s, %u expected.\n", reply->pnr_magic, libcfs_id2str(rpc->crpc_dest), LST_PING_TEST_MAGIC); return; } if (reply->pnr_seq != reqst->pnr_seq) { rpc->crpc_status = -EBADMSG; atomic_inc(&sn->sn_ping_errors); CERROR ("Bad seq %u from %s, %u expected.\n", reply->pnr_seq, libcfs_id2str(rpc->crpc_dest), reqst->pnr_seq); return; } cfs_fs_timeval(&tv); CDEBUG (D_NET, "%d reply in %u usec\n", reply->pnr_seq, (unsigned)((tv.tv_sec - (unsigned)reqst->pnr_time_sec) * 1000000 + (tv.tv_usec - reqst->pnr_time_usec))); return; }
static void ping_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc) { struct sfw_test_instance *tsi = tsu->tsu_instance; struct sfw_session *sn = tsi->tsi_batch->bat_session; struct srpc_ping_reqst *reqst = &rpc->crpc_reqstmsg.msg_body.ping_reqst; struct srpc_ping_reply *reply = &rpc->crpc_replymsg.msg_body.ping_reply; struct timespec64 ts; LASSERT(sn); if (rpc->crpc_status) { if (!tsi->tsi_stopping) /* rpc could have been aborted */ atomic_inc(&sn->sn_ping_errors); CERROR("Unable to ping %s (%d): %d\n", libcfs_id2str(rpc->crpc_dest), reqst->pnr_seq, rpc->crpc_status); return; } if (rpc->crpc_replymsg.msg_magic != SRPC_MSG_MAGIC) { __swab32s(&reply->pnr_seq); __swab32s(&reply->pnr_magic); __swab32s(&reply->pnr_status); } if (reply->pnr_magic != LST_PING_TEST_MAGIC) { rpc->crpc_status = -EBADMSG; atomic_inc(&sn->sn_ping_errors); CERROR("Bad magic %u from %s, %u expected.\n", reply->pnr_magic, libcfs_id2str(rpc->crpc_dest), LST_PING_TEST_MAGIC); return; } if (reply->pnr_seq != reqst->pnr_seq) { rpc->crpc_status = -EBADMSG; atomic_inc(&sn->sn_ping_errors); CERROR("Bad seq %u from %s, %u expected.\n", reply->pnr_seq, libcfs_id2str(rpc->crpc_dest), reqst->pnr_seq); return; } ktime_get_real_ts64(&ts); CDEBUG(D_NET, "%d reply in %u usec\n", reply->pnr_seq, (unsigned int)((ts.tv_sec - reqst->pnr_time_sec) * 1000000 + (ts.tv_nsec / NSEC_PER_USEC - reqst->pnr_time_usec))); }
void lustre_swab_llogd_conn_body (struct llogd_conn_body *d) { __swab64s (&d->lgdc_gen.mnt_cnt); __swab64s (&d->lgdc_gen.conn_cnt); lustre_swab_llog_id(&d->lgdc_logid); __swab32s (&d->lgdc_ctxt_idx); }
int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed) { struct ptlrpc_bulk_sec_desc *bsd; int size = msg->lm_buflens[offset]; bsd = lustre_msg_buf(msg, offset, sizeof(*bsd)); if (!bsd) { CERROR("Invalid bulk sec desc: size %d\n", size); return -EINVAL; } if (swabbed) __swab32s(&bsd->bsd_nob); if (unlikely(bsd->bsd_version != 0)) { CERROR("Unexpected version %u\n", bsd->bsd_version); return -EPROTO; } if (unlikely(bsd->bsd_type >= SPTLRPC_BULK_MAX)) { CERROR("Invalid type %u\n", bsd->bsd_type); return -EPROTO; } /* FIXME more sanity check here */ if (unlikely(bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL && bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG && bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV)) { CERROR("Invalid svc %u\n", bsd->bsd_svc); return -EPROTO; } return 0; }
void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size) { struct cfg_marker32 *cm32 = (struct cfg_marker32*)marker; ENTRY; if (swab) { __swab32s(&marker->cm_step); __swab32s(&marker->cm_flags); __swab32s(&marker->cm_vers); } if (size == sizeof(*cm32)) { __u32 createtime, canceltime; /* There was a problem with the original declaration of * cfg_marker on 32-bit systems because it used time_t as * a wire protocol structure, and didn't verify this in * wirecheck. We now have to convert the offsets of the * later fields in order to work on 32- and 64-bit systems. * * Fortunately, the cm_comment field has no functional use * so can be sacrificed when converting the timestamp size. * * Overwrite fields from the end first, so they are not * clobbered, and use memmove() instead of memcpy() because * the source and target buffers overlap. bug 16771 */ createtime = cm32->cm_createtime; canceltime = cm32->cm_canceltime; memmove(marker->cm_comment, cm32->cm_comment, MTI_NAMELEN32); marker->cm_comment[MTI_NAMELEN32 - 1] = '\0'; memmove(marker->cm_tgtname, cm32->cm_tgtname, sizeof(marker->cm_tgtname)); if (swab) { __swab32s(&createtime); __swab32s(&canceltime); } marker->cm_createtime = createtime; marker->cm_canceltime = canceltime; CDEBUG(D_CONFIG, "Find old cfg_marker(Srv32b,Clt64b) " "for target %s, converting\n", marker->cm_tgtname); } else if (swab) { __swab64s(&marker->cm_createtime); __swab64s(&marker->cm_canceltime); } EXIT; return; }
static int out_index_insert(struct tgt_session_info *tsi) { struct tgt_thread_info *tti = tgt_th_info(tsi->tsi_env); struct object_update *update = tti->tti_u.update.tti_update; struct dt_object *obj = tti->tti_u.update.tti_dt_object; struct dt_insert_rec *rec = &tti->tti_rec; struct lu_fid *fid; char *name; __u32 *ptype; int rc = 0; size_t size; ENTRY; name = object_update_param_get(update, 0, NULL); if (IS_ERR(name)) { CERROR("%s: empty name for index insert: rc = %ld\n", tgt_name(tsi->tsi_tgt), PTR_ERR(name)); RETURN(PTR_ERR(name)); } fid = object_update_param_get(update, 1, &size); if (IS_ERR(fid) || size != sizeof(*fid)) { CERROR("%s: invalid fid: rc = %ld\n", tgt_name(tsi->tsi_tgt), PTR_ERR(fid)); RETURN(PTR_ERR(fid)); } if (ptlrpc_req_need_swab(tsi->tsi_pill->rc_req)) lustre_swab_lu_fid(fid); if (!fid_is_sane(fid)) { CERROR("%s: invalid FID "DFID": rc = %d\n", tgt_name(tsi->tsi_tgt), PFID(fid), -EPROTO); RETURN(-EPROTO); } ptype = object_update_param_get(update, 2, &size); if (IS_ERR(ptype) || size != sizeof(*ptype)) { CERROR("%s: invalid type for index insert: rc = %ld\n", tgt_name(tsi->tsi_tgt), PTR_ERR(ptype)); RETURN(PTR_ERR(ptype)); } if (ptlrpc_req_need_swab(tsi->tsi_pill->rc_req)) __swab32s(ptype); rec->rec_fid = fid; rec->rec_type = *ptype; rc = out_tx_index_insert(tsi->tsi_env, obj, (const struct dt_rec *)rec, (const struct dt_key *)name, &tti->tti_tea, tti->tti_tea.ta_handle, tti->tti_u.update.tti_update_reply, tti->tti_u.update.tti_update_reply_index); RETURN(rc); }
static int ping_server_handle(struct srpc_server_rpc *rpc) { struct srpc_service *sv = rpc->srpc_scd->scd_svc; srpc_msg_t *reqstmsg = &rpc->srpc_reqstbuf->buf_msg; srpc_msg_t *replymsg = &rpc->srpc_replymsg; srpc_ping_reqst_t *req = &reqstmsg->msg_body.ping_reqst; srpc_ping_reply_t *rep = &rpc->srpc_replymsg.msg_body.ping_reply; LASSERT (sv->sv_id == SRPC_SERVICE_PING); if (reqstmsg->msg_magic != SRPC_MSG_MAGIC) { LASSERT (reqstmsg->msg_magic == __swab32(SRPC_MSG_MAGIC)); __swab32s(&req->pnr_seq); __swab32s(&req->pnr_magic); __swab64s(&req->pnr_time_sec); __swab64s(&req->pnr_time_usec); } LASSERT (reqstmsg->msg_type == srpc_service2request(sv->sv_id)); if (req->pnr_magic != LST_PING_TEST_MAGIC) { CERROR ("Unexpected magic %08x from %s\n", req->pnr_magic, libcfs_id2str(rpc->srpc_peer)); return -EINVAL; } rep->pnr_seq = req->pnr_seq; rep->pnr_magic = LST_PING_TEST_MAGIC; if ((reqstmsg->msg_ses_feats & ~LST_FEATS_MASK) != 0) { replymsg->msg_ses_feats = LST_FEATS_MASK; rep->pnr_status = EPROTO; return 0; } replymsg->msg_ses_feats = reqstmsg->msg_ses_feats; CDEBUG(D_NET, "Get ping %d from %s\n", req->pnr_seq, libcfs_id2str(rpc->srpc_peer)); return 0; }
/** * Byte-swap the fields of struct lov_user_md. * * XXX Rather than duplicating swabbing code here, we should eventually * refactor the needed functions in lustre/ptlrpc/pack_generic.c * into a library that can be shared between kernel and user code. */ static void llapi_layout_swab_lov_user_md(struct lov_user_md *lum, int object_count) { int i; struct lov_user_md_v3 *lumv3 = (struct lov_user_md_v3 *)lum; struct lov_user_ost_data *lod; __swab32s(&lum->lmm_magic); __swab32s(&lum->lmm_pattern); __swab32s(&lum->lmm_stripe_size); __swab16s(&lum->lmm_stripe_count); __swab16s(&lum->lmm_stripe_offset); if (lum->lmm_magic != LOV_MAGIC_V1) lod = lumv3->lmm_objects; else lod = lum->lmm_objects; for (i = 0; i < object_count; i++) __swab32s(&lod[i].l_ost_idx); }
void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg) { int i; ENTRY; __swab32s(&lcfg->lcfg_version); if (lcfg->lcfg_version != LUSTRE_CFG_VERSION) { CERROR("not swabbing lustre_cfg version %#x (expecting %#x)\n", lcfg->lcfg_version, LUSTRE_CFG_VERSION); EXIT; return; } __swab32s(&lcfg->lcfg_command); __swab32s(&lcfg->lcfg_num); __swab32s(&lcfg->lcfg_flags); __swab64s(&lcfg->lcfg_nid); __swab32s(&lcfg->lcfg_bufcount); for (i = 0; i < lcfg->lcfg_bufcount && i < LUSTRE_CFG_MAX_BUFCOUNT; i++) __swab32s(&lcfg->lcfg_buflens[i]); print_lustre_cfg(lcfg); EXIT; return; }
/** * Swab, if needed, LOA (for OST-object only) structure with LMA EA and PFID EA * combined together are stored on-disk in little-endian order. * * \param[in] loa - the pointer to the LOA structure to be swabbed. * \param[in] to_cpu - to indicate swab for CPU order or not. */ void lustre_loa_swab(struct lustre_ost_attrs *loa, bool to_cpu) { struct lustre_mdt_attrs *lma = &loa->loa_lma; #ifdef __BIG_ENDIAN __u32 compat = lma->lma_compat; #endif lustre_lma_swab(lma); #ifdef __BIG_ENDIAN if (to_cpu) compat = lma->lma_compat; if (compat & LMAC_STRIPE_INFO) { lustre_swab_lu_fid(&loa->loa_parent_fid); __swab32s(&loa->loa_stripe_size); } if (compat & LMAC_COMP_INFO) { __swab32s(&loa->loa_comp_id); __swab64s(&loa->loa_comp_start); __swab64s(&loa->loa_comp_end); } #endif }
static int out_xattr_set(struct tgt_session_info *tsi) { struct tgt_thread_info *tti = tgt_th_info(tsi->tsi_env); struct object_update *update = tti->tti_u.update.tti_update; struct dt_object *obj = tti->tti_u.update.tti_dt_object; struct lu_buf *lbuf = &tti->tti_buf; char *name; char *buf; char *tmp; int buf_len = 0; int flag; int rc; ENTRY; name = object_update_param_get(update, 0, NULL); if (name == NULL) { CERROR("%s: empty name for xattr set: rc = %d\n", tgt_name(tsi->tsi_tgt), -EPROTO); RETURN(err_serious(-EPROTO)); } buf = object_update_param_get(update, 1, &buf_len); if (buf == NULL || buf_len == 0) { CERROR("%s: empty buf for xattr set: rc = %d\n", tgt_name(tsi->tsi_tgt), -EPROTO); RETURN(err_serious(-EPROTO)); } lbuf->lb_buf = buf; lbuf->lb_len = buf_len; tmp = (char *)object_update_param_get(update, 2, NULL); if (tmp == NULL) { CERROR("%s: empty flag for xattr set: rc = %d\n", tgt_name(tsi->tsi_tgt), -EPROTO); RETURN(err_serious(-EPROTO)); } if (ptlrpc_req_need_swab(tsi->tsi_pill->rc_req)) __swab32s((__u32 *)tmp); flag = *(int *)tmp; rc = out_tx_xattr_set(tsi->tsi_env, obj, lbuf, name, flag, &tti->tti_tea, tti->tti_u.update.tti_update_reply, tti->tti_u.update.tti_update_reply_index); RETURN(rc); }
static int out_xattr_set(struct tgt_session_info *tsi) { struct tgt_thread_info *tti = tgt_th_info(tsi->tsi_env); struct object_update *update = tti->tti_u.update.tti_update; struct dt_object *obj = tti->tti_u.update.tti_dt_object; struct lu_buf *lbuf = &tti->tti_buf; char *name; char *buf; __u32 *tmp; size_t buf_len = 0; int flag; size_t size = 0; int rc; ENTRY; name = object_update_param_get(update, 0, NULL); if (IS_ERR(name)) { CERROR("%s: empty name for xattr set: rc = %ld\n", tgt_name(tsi->tsi_tgt), PTR_ERR(name)); RETURN(PTR_ERR(name)); } /* If buffer == NULL (-ENODATA), then it might mean delete xattr */ buf = object_update_param_get(update, 1, &buf_len); if (IS_ERR(buf) && PTR_ERR(buf) != -ENODATA) RETURN(PTR_ERR(buf)); lbuf->lb_buf = buf; lbuf->lb_len = buf_len; tmp = object_update_param_get(update, 2, &size); if (IS_ERR(tmp) || size != sizeof(*tmp)) { CERROR("%s: emptry or wrong size %zu flag: rc = %ld\n", tgt_name(tsi->tsi_tgt), size, PTR_ERR(tmp)); RETURN(PTR_ERR(tmp)); } if (ptlrpc_req_need_swab(tsi->tsi_pill->rc_req)) __swab32s(tmp); flag = *tmp; rc = out_tx_xattr_set(tsi->tsi_env, obj, lbuf, name, flag, &tti->tti_tea, tti->tti_tea.ta_handle, tti->tti_u.update.tti_update_reply, tti->tti_u.update.tti_update_reply_index); RETURN(rc); }
void lustre_swab_llog_id(struct llog_logid *log_id) { __swab64s(&log_id->lgl_oi.oi.oi_id); __swab64s(&log_id->lgl_oi.oi.oi_seq); __swab32s(&log_id->lgl_ogen); }
void lustre_swab_lu_fid(struct lu_fid *fid) { __swab64s (&fid->f_seq); __swab32s (&fid->f_oid); __swab32s (&fid->f_ver); }
void lustre_swab_llog_rec(struct llog_rec_hdr *rec) { struct llog_rec_tail *tail = NULL; __swab32s(&rec->lrh_len); __swab32s(&rec->lrh_index); __swab32s(&rec->lrh_type); __swab32s(&rec->lrh_id); switch (rec->lrh_type) { case OST_SZ_REC: { struct llog_size_change_rec *lsc = (struct llog_size_change_rec *)rec; lustre_swab_ll_fid(&lsc->lsc_fid); __swab32s(&lsc->lsc_ioepoch); tail = &lsc->lsc_tail; break; } case MDS_UNLINK_REC: { struct llog_unlink_rec *lur = (struct llog_unlink_rec *)rec; __swab64s(&lur->lur_oid); __swab32s(&lur->lur_oseq); __swab32s(&lur->lur_count); tail = &lur->lur_tail; break; } case MDS_UNLINK64_REC: { struct llog_unlink64_rec *lur = (struct llog_unlink64_rec *)rec; lustre_swab_lu_fid(&lur->lur_fid); __swab32s(&lur->lur_count); tail = &lur->lur_tail; break; } case CHANGELOG_REC: { struct llog_changelog_rec *cr = (struct llog_changelog_rec*)rec; __swab16s(&cr->cr.cr_namelen); __swab16s(&cr->cr.cr_flags); __swab32s(&cr->cr.cr_type); __swab64s(&cr->cr.cr_index); __swab64s(&cr->cr.cr_prev); __swab64s(&cr->cr.cr_time); lustre_swab_lu_fid(&cr->cr.cr_tfid); lustre_swab_lu_fid(&cr->cr.cr_pfid); if (CHANGELOG_REC_EXTENDED(&cr->cr)) { struct llog_changelog_ext_rec *ext = (struct llog_changelog_ext_rec *)rec; lustre_swab_lu_fid(&ext->cr.cr_sfid); lustre_swab_lu_fid(&ext->cr.cr_spfid); tail = &ext->cr_tail; } else { tail = &cr->cr_tail; } break; } case CHANGELOG_USER_REC: { struct llog_changelog_user_rec *cur = (struct llog_changelog_user_rec*)rec; __swab32s(&cur->cur_id); __swab64s(&cur->cur_endrec); tail = &cur->cur_tail; break; } case HSM_AGENT_REC: { struct llog_agent_req_rec *arr = (struct llog_agent_req_rec *)rec; __swab32s(&arr->arr_hai.hai_len); __swab32s(&arr->arr_hai.hai_action); lustre_swab_lu_fid(&arr->arr_hai.hai_fid); lustre_swab_lu_fid(&arr->arr_hai.hai_dfid); __swab64s(&arr->arr_hai.hai_cookie); __swab64s(&arr->arr_hai.hai_extent.offset); __swab64s(&arr->arr_hai.hai_extent.length); __swab64s(&arr->arr_hai.hai_gid); /* no swabing for opaque data */ /* hai_data[0]; */ break; } case MDS_SETATTR64_REC: { struct llog_setattr64_rec *lsr = (struct llog_setattr64_rec *)rec; lustre_swab_ost_id(&lsr->lsr_oi); __swab32s(&lsr->lsr_uid); __swab32s(&lsr->lsr_uid_h); __swab32s(&lsr->lsr_gid); __swab32s(&lsr->lsr_gid_h); tail = &lsr->lsr_tail; break; } case OBD_CFG_REC: /* these are swabbed as they are consumed */ break; case LLOG_HDR_MAGIC: { struct llog_log_hdr *llh = (struct llog_log_hdr *)rec; __swab64s(&llh->llh_timestamp); __swab32s(&llh->llh_count); __swab32s(&llh->llh_bitmap_offset); __swab32s(&llh->llh_flags); __swab32s(&llh->llh_size); __swab32s(&llh->llh_cat_idx); tail = &llh->llh_tail; break; } case LLOG_LOGID_MAGIC: { struct llog_logid_rec *lid = (struct llog_logid_rec *)rec; lustre_swab_llog_id(&lid->lid_id); tail = &lid->lid_tail; break; } case LLOG_GEN_REC: { struct llog_gen_rec *lgr = (struct llog_gen_rec *)rec; __swab64s(&lgr->lgr_gen.mnt_cnt); __swab64s(&lgr->lgr_gen.conn_cnt); tail = &lgr->lgr_tail; break; } case LLOG_PAD_MAGIC: break; default: CERROR("Unknown llog rec type %#x swabbing rec %p\n", rec->lrh_type, rec); } if (tail) { __swab32s(&tail->lrt_len); __swab32s(&tail->lrt_index); } }
static void wl12xx_spi_init(struct device *child) { struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); struct spi_transfer t; struct spi_message m; struct spi_device *spi = to_spi_device(glue->dev); u8 *cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL); if (!cmd) { dev_err(child->parent, "could not allocate cmd for spi init\n"); return; } memset(&t, 0, sizeof(t)); spi_message_init(&m); /* * Set WSPI_INIT_COMMAND * the data is being send from the MSB to LSB */ cmd[0] = 0xff; cmd[1] = 0xff; cmd[2] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX; cmd[3] = 0; cmd[4] = 0; cmd[5] = HW_ACCESS_WSPI_INIT_CMD_MASK << 3; cmd[5] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN; cmd[6] = WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS | WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS; if (HW_ACCESS_WSPI_FIXED_BUSY_LEN == 0) cmd[6] |= WSPI_INIT_CMD_DIS_FIXEDBUSY; else cmd[6] |= WSPI_INIT_CMD_EN_FIXEDBUSY; cmd[7] = crc7_be(0, cmd+2, WSPI_INIT_CMD_CRC_LEN) | WSPI_INIT_CMD_END; /* * The above is the logical order; it must actually be stored * in the buffer byte-swapped. */ __swab32s((u32 *)cmd); __swab32s((u32 *)cmd+1); t.tx_buf = cmd; t.len = WSPI_INIT_CMD_LEN; spi_message_add_tail(&t, &m); spi_sync(to_spi_device(glue->dev), &m); /* Send extra clocks with inverted CS (high). this is required * by the wilink family in order to successfully enter WSPI mode. */ spi->mode ^= SPI_CS_HIGH; memset(&m, 0, sizeof(m)); spi_message_init(&m); cmd[0] = 0xff; cmd[1] = 0xff; cmd[2] = 0xff; cmd[3] = 0xff; __swab32s((u32 *)cmd); t.tx_buf = cmd; t.len = 4; spi_message_add_tail(&t, &m); spi_sync(to_spi_device(glue->dev), &m); /* Restore chip select configration to normal */ spi->mode ^= SPI_CS_HIGH; kfree(cmd); }
/* Switch on rx_state. * Return 0 on success, 1 if whole packet is read, else return <0 * Always set cont_flag: 1 if we're ready to continue reading, else 0 * NB: If whole packet is read, cont_flag will be set to zero to take * care of fairess */ int usocklnd_read_msg(usock_conn_t *conn, int *cont_flag) { int rc = 0; __u64 cookie; *cont_flag = 0; /* smth. new emerged in RX part - let's process it */ switch (conn->uc_rx_state) { case UC_RX_KSM_HEADER: if (conn->uc_flip) { __swab32s(&conn->uc_rx_msg.ksm_type); __swab32s(&conn->uc_rx_msg.ksm_csum); __swab64s(&conn->uc_rx_msg.ksm_zc_cookies[0]); __swab64s(&conn->uc_rx_msg.ksm_zc_cookies[1]); } /* we never send packets for wich zc-acking is required */ if (conn->uc_rx_msg.ksm_type != KSOCK_MSG_LNET || conn->uc_rx_msg.ksm_zc_cookies[1] != 0) { conn->uc_errored = 1; return -EPROTO; } /* zc_req will be processed later, when lnet payload will be received */ usocklnd_rx_lnethdr_state_transition(conn); *cont_flag = 1; break; case UC_RX_LNET_HEADER: if (the_lnet.ln_pid & LNET_PID_USERFLAG) { /* replace dest_nid,pid (ksocknal sets its own) */ conn->uc_rx_msg.ksm_u.lnetmsg.ksnm_hdr.dest_nid = cpu_to_le64(conn->uc_peer->up_ni->ni_nid); conn->uc_rx_msg.ksm_u.lnetmsg.ksnm_hdr.dest_pid = cpu_to_le32(the_lnet.ln_pid); } else if (conn->uc_peer->up_peerid.pid & LNET_PID_USERFLAG) { /* Userspace peer */ lnet_process_id_t *id = &conn->uc_peer->up_peerid; lnet_hdr_t *lhdr = &conn->uc_rx_msg.ksm_u.lnetmsg.ksnm_hdr; /* Substitute process ID assigned at connection time */ lhdr->src_pid = cpu_to_le32(id->pid); lhdr->src_nid = cpu_to_le64(id->nid); } conn->uc_rx_state = UC_RX_PARSE; usocklnd_conn_addref(conn); /* ++ref while parsing */ rc = lnet_parse(conn->uc_peer->up_ni, &conn->uc_rx_msg.ksm_u.lnetmsg.ksnm_hdr, conn->uc_peerid.nid, conn, 0); if (rc < 0) { /* I just received garbage: give up on this conn */ conn->uc_errored = 1; usocklnd_conn_decref(conn); return -EPROTO; } /* Race with usocklnd_recv() is possible */ pthread_mutex_lock(&conn->uc_lock); LASSERT (conn->uc_rx_state == UC_RX_PARSE || conn->uc_rx_state == UC_RX_LNET_PAYLOAD); /* check whether usocklnd_recv() got called */ if (conn->uc_rx_state == UC_RX_LNET_PAYLOAD) *cont_flag = 1; pthread_mutex_unlock(&conn->uc_lock); break; case UC_RX_PARSE: LBUG(); /* it's error to be here, because this special * case is handled by caller */ break; case UC_RX_PARSE_WAIT: LBUG(); /* it's error to be here, because the conn * shouldn't wait for POLLIN event in this * state */ break; case UC_RX_LNET_PAYLOAD: /* payload all received */ lnet_finalize(conn->uc_peer->up_ni, conn->uc_rx_lnetmsg, 0); cookie = conn->uc_rx_msg.ksm_zc_cookies[0]; if (cookie != 0) rc = usocklnd_handle_zc_req(conn->uc_peer, cookie); if (rc != 0) { /* change state not to finalize twice */ conn->uc_rx_state = UC_RX_KSM_HEADER; return -EPROTO; } /* Fall through */ case UC_RX_SKIPPING: if (conn->uc_rx_nob_left != 0) { usocklnd_rx_skipping_state_transition(conn); *cont_flag = 1; } else { usocklnd_rx_ksmhdr_state_transition(conn); rc = 1; /* whole packet is read */ } break; default: LBUG(); /* unknown state */ } return rc; }
/* Switch on rx_state. * Return 0 on success, else return <0 * Always set cont_flag: 1 if we're ready to continue reading, else 0 */ int usocklnd_read_hello(usock_conn_t *conn, int *cont_flag) { int rc = 0; ksock_hello_msg_t *hello = conn->uc_rx_hello; *cont_flag = 0; /* smth. new emerged in hello - let's process it */ switch (conn->uc_rx_state) { case UC_RX_HELLO_MAGIC: if (hello->kshm_magic == LNET_PROTO_MAGIC) conn->uc_flip = 0; else if (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC)) conn->uc_flip = 1; else return -EPROTO; usocklnd_rx_helloversion_state_transition(conn); *cont_flag = 1; break; case UC_RX_HELLO_VERSION: if ((!conn->uc_flip && (hello->kshm_version != KSOCK_PROTO_V2)) || (conn->uc_flip && (hello->kshm_version != __swab32(KSOCK_PROTO_V2)))) return -EPROTO; usocklnd_rx_hellobody_state_transition(conn); *cont_flag = 1; break; case UC_RX_HELLO_BODY: if (conn->uc_flip) { ksock_hello_msg_t *hello = conn->uc_rx_hello; __swab32s(&hello->kshm_src_pid); __swab64s(&hello->kshm_src_nid); __swab32s(&hello->kshm_dst_pid); __swab64s(&hello->kshm_dst_nid); __swab64s(&hello->kshm_src_incarnation); __swab64s(&hello->kshm_dst_incarnation); __swab32s(&hello->kshm_ctype); __swab32s(&hello->kshm_nips); } if (conn->uc_rx_hello->kshm_nips > LNET_MAX_INTERFACES) { CERROR("Bad nips %d from ip %u.%u.%u.%u port %d\n", conn->uc_rx_hello->kshm_nips, HIPQUAD(conn->uc_peer_ip), conn->uc_peer_port); return -EPROTO; } if (conn->uc_rx_hello->kshm_nips) { usocklnd_rx_helloIPs_state_transition(conn); *cont_flag = 1; break; } /* fall through */ case UC_RX_HELLO_IPS: if (conn->uc_activeflag == 1) /* active conn */ rc = usocklnd_activeconn_hellorecv(conn); else /* passive conn */ rc = usocklnd_passiveconn_hellorecv(conn); break; default: LBUG(); /* unknown state */ } return rc; }
int lnet_accept(socket_t *sock, __u32 magic) { lnet_acceptor_connreq_t cr; __u32 peer_ip; int peer_port; int rc; int flip; lnet_ni_t *ni; char *str; LASSERT(sizeof(cr) <= 16); /* not too big for the stack */ rc = libcfs_sock_getaddr(sock, 1, &peer_ip, &peer_port); LASSERT(rc == 0); /* we succeeded before */ if (!lnet_accept_magic(magic, LNET_PROTO_ACCEPTOR_MAGIC)) { if (lnet_accept_magic(magic, LNET_PROTO_MAGIC)) { /* future version compatibility! * When LNET unifies protocols over all LNDs, the first * thing sent will be a version query. I send back * LNET_PROTO_ACCEPTOR_MAGIC to tell her I'm "old" */ memset(&cr, 0, sizeof(cr)); cr.acr_magic = LNET_PROTO_ACCEPTOR_MAGIC; cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION; rc = libcfs_sock_write(sock, &cr, sizeof(cr), accept_timeout); if (rc != 0) CERROR("Error sending magic+version in response to LNET magic from %pI4h: %d\n", &peer_ip, rc); return -EPROTO; } if (magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) str = "'old' socknal/tcpnal"; else if (lnet_accept_magic(magic, LNET_PROTO_RA_MAGIC)) str = "'old' ranal"; else str = "unrecognised"; LCONSOLE_ERROR_MSG(0x11f, "Refusing connection from %pI4h magic %08x: %s acceptor protocol\n", &peer_ip, magic, str); return -EPROTO; } flip = (magic != LNET_PROTO_ACCEPTOR_MAGIC); rc = libcfs_sock_read(sock, &cr.acr_version, sizeof(cr.acr_version), accept_timeout); if (rc != 0) { CERROR("Error %d reading connection request version from %pI4h\n", rc, &peer_ip); return -EIO; } if (flip) __swab32s(&cr.acr_version); if (cr.acr_version != LNET_PROTO_ACCEPTOR_VERSION) { /* future version compatibility! * An acceptor-specific protocol rev will first send a version * query. I send back my current version to tell her I'm * "old". */ int peer_version = cr.acr_version; memset(&cr, 0, sizeof(cr)); cr.acr_magic = LNET_PROTO_ACCEPTOR_MAGIC; cr.acr_version = LNET_PROTO_ACCEPTOR_VERSION; rc = libcfs_sock_write(sock, &cr, sizeof(cr), accept_timeout); if (rc != 0) CERROR("Error sending magic+version in response to version %d from %pI4h: %d\n", peer_version, &peer_ip, rc); return -EPROTO; } rc = libcfs_sock_read(sock, &cr.acr_nid, sizeof(cr) - offsetof(lnet_acceptor_connreq_t, acr_nid), accept_timeout); if (rc != 0) { CERROR("Error %d reading connection request from %pI4h\n", rc, &peer_ip); return -EIO; } if (flip) __swab64s(&cr.acr_nid); ni = lnet_net2ni(LNET_NIDNET(cr.acr_nid)); if (ni == NULL || /* no matching net */ ni->ni_nid != cr.acr_nid) { /* right NET, wrong NID! */ if (ni != NULL) lnet_ni_decref(ni); LCONSOLE_ERROR_MSG(0x120, "Refusing connection from %pI4h for %s: No matching NI\n", &peer_ip, libcfs_nid2str(cr.acr_nid)); return -EPERM; } if (ni->ni_lnd->lnd_accept == NULL) { /* This catches a request for the loopback LND */ lnet_ni_decref(ni); LCONSOLE_ERROR_MSG(0x121, "Refusing connection from %pI4h for %s: NI doesn not accept IP connections\n", &peer_ip, libcfs_nid2str(cr.acr_nid)); return -EPERM; } CDEBUG(D_NET, "Accept %s from %pI4h\n", libcfs_nid2str(cr.acr_nid), &peer_ip); rc = ni->ni_lnd->lnd_accept(ni, sock); lnet_ni_decref(ni); return rc; }
void lustre_swab_ll_fid(struct ll_fid *fid) { __swab64s (&fid->id); __swab32s (&fid->generation); __swab32s (&fid->f_type); }
void lustre_swab_llog_rec(struct llog_rec_hdr *rec) { struct llog_rec_tail *tail = NULL; __swab32s(&rec->lrh_len); __swab32s(&rec->lrh_index); __swab32s(&rec->lrh_type); __swab32s(&rec->lrh_id); switch (rec->lrh_type) { case OST_SZ_REC: { struct llog_size_change_rec *lsc = (struct llog_size_change_rec *)rec; lustre_swab_ll_fid(&lsc->lsc_fid); __swab32s(&lsc->lsc_ioepoch); tail = &lsc->lsc_tail; break; } case MDS_UNLINK_REC: { struct llog_unlink_rec *lur = (struct llog_unlink_rec *)rec; __swab64s(&lur->lur_oid); __swab32s(&lur->lur_oseq); __swab32s(&lur->lur_count); tail = &lur->lur_tail; break; } case MDS_UNLINK64_REC: { struct llog_unlink64_rec *lur = (struct llog_unlink64_rec *)rec; lustre_swab_lu_fid(&lur->lur_fid); __swab32s(&lur->lur_count); tail = &lur->lur_tail; break; } case CHANGELOG_REC: { struct llog_changelog_rec *cr = (struct llog_changelog_rec *)rec; __swab16s(&cr->cr.cr_namelen); __swab16s(&cr->cr.cr_flags); __swab32s(&cr->cr.cr_type); __swab64s(&cr->cr.cr_index); __swab64s(&cr->cr.cr_prev); __swab64s(&cr->cr.cr_time); lustre_swab_lu_fid(&cr->cr.cr_tfid); lustre_swab_lu_fid(&cr->cr.cr_pfid); if (cr->cr.cr_flags & CLF_RENAME) { struct changelog_ext_rename *rnm = changelog_rec_rename(&cr->cr); lustre_swab_lu_fid(&rnm->cr_sfid); lustre_swab_lu_fid(&rnm->cr_spfid); } /* Because the tail follows a variable-length structure we need * to compute its location at runtime */ tail = (struct llog_rec_tail *)((char *)&cr->cr + changelog_rec_size(&cr->cr) + cr->cr.cr_namelen); break; } case CHANGELOG_USER_REC: { struct llog_changelog_user_rec *cur = (struct llog_changelog_user_rec*)rec; __swab32s(&cur->cur_id); __swab64s(&cur->cur_endrec); tail = &cur->cur_tail; break; } case HSM_AGENT_REC: { struct llog_agent_req_rec *arr = (struct llog_agent_req_rec *)rec; __swab32s(&arr->arr_hai.hai_len); __swab32s(&arr->arr_hai.hai_action); lustre_swab_lu_fid(&arr->arr_hai.hai_fid); lustre_swab_lu_fid(&arr->arr_hai.hai_dfid); __swab64s(&arr->arr_hai.hai_cookie); __swab64s(&arr->arr_hai.hai_extent.offset); __swab64s(&arr->arr_hai.hai_extent.length); __swab64s(&arr->arr_hai.hai_gid); /* no swabing for opaque data */ /* hai_data[0]; */ break; } case MDS_SETATTR64_REC: { struct llog_setattr64_rec *lsr = (struct llog_setattr64_rec *)rec; lustre_swab_ost_id(&lsr->lsr_oi); __swab32s(&lsr->lsr_uid); __swab32s(&lsr->lsr_uid_h); __swab32s(&lsr->lsr_gid); __swab32s(&lsr->lsr_gid_h); __swab64s(&lsr->lsr_valid); tail = &lsr->lsr_tail; break; } case OBD_CFG_REC: /* these are swabbed as they are consumed */ break; case LLOG_HDR_MAGIC: { struct llog_log_hdr *llh = (struct llog_log_hdr *)rec; __swab64s(&llh->llh_timestamp); __swab32s(&llh->llh_count); __swab32s(&llh->llh_bitmap_offset); __swab32s(&llh->llh_flags); __swab32s(&llh->llh_size); __swab32s(&llh->llh_cat_idx); tail = LLOG_HDR_TAIL(llh); break; } case LLOG_LOGID_MAGIC: { struct llog_logid_rec *lid = (struct llog_logid_rec *)rec; lustre_swab_llog_id(&lid->lid_id); tail = &lid->lid_tail; break; } case LLOG_GEN_REC: { struct llog_gen_rec *lgr = (struct llog_gen_rec *)rec; __swab64s(&lgr->lgr_gen.mnt_cnt); __swab64s(&lgr->lgr_gen.conn_cnt); tail = &lgr->lgr_tail; break; } case LLOG_PAD_MAGIC: break; case UPDATE_REC: { struct llog_update_record *lur = (struct llog_update_record *)rec; struct update_records *record = &lur->lur_update_rec; __swab32s(&record->ur_flags); __swab64s(&record->ur_batchid); __swab64s(&record->ur_master_transno); __swab32s(&record->ur_param_count); __swab32s(&record->ur_update_count); lustre_swab_update_ops(&record->ur_ops, record->ur_update_count); /* Compute tail location. */ tail = (struct llog_rec_tail *)((char *)record + update_records_size(record)); break; } default: CERROR("Unknown llog rec type %#x swabbing rec %p\n", rec->lrh_type, rec); } if (tail) { __swab32s(&tail->lrt_len); __swab32s(&tail->lrt_index); } }