Пример #1
0
int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
		 struct ib_send_wr **bad_wr)
{
	struct c2_dev *c2dev = to_c2dev(ibqp->device);
	struct c2_qp *qp = to_c2qp(ibqp);
	union c2wr wr;
	unsigned long lock_flags;
	int err = 0;

	u32 flags;
	u32 tot_len;
	u8 actual_sge_count;
	u32 msg_size;

	if (qp->state > IB_QPS_RTS)
		return -EINVAL;

	while (ib_wr) {

		flags = 0;
		wr.sqwr.sq_hdr.user_hdr.hdr.context = ib_wr->wr_id;
		if (ib_wr->send_flags & IB_SEND_SIGNALED) {
			flags |= SQ_SIGNALED;
		}

		switch (ib_wr->opcode) {
		case IB_WR_SEND:
			if (ib_wr->send_flags & IB_SEND_SOLICITED) {
				c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE);
				msg_size = sizeof(struct c2wr_send_req);
			} else {
				c2_wr_set_id(&wr, C2_WR_TYPE_SEND);
				msg_size = sizeof(struct c2wr_send_req);
			}

			wr.sqwr.send.remote_stag = 0;
			msg_size += sizeof(struct c2_data_addr) * ib_wr->num_sge;
			if (ib_wr->num_sge > qp->send_sgl_depth) {
				err = -EINVAL;
				break;
			}
			if (ib_wr->send_flags & IB_SEND_FENCE) {
				flags |= SQ_READ_FENCE;
			}
			err = move_sgl((struct c2_data_addr *) & (wr.sqwr.send.data),
				       ib_wr->sg_list,
				       ib_wr->num_sge,
				       &tot_len, &actual_sge_count);
			wr.sqwr.send.sge_len = cpu_to_be32(tot_len);
			c2_wr_set_sge_count(&wr, actual_sge_count);
			break;
		case IB_WR_RDMA_WRITE:
			c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_WRITE);
			msg_size = sizeof(struct c2wr_rdma_write_req) +
			    (sizeof(struct c2_data_addr) * ib_wr->num_sge);
			if (ib_wr->num_sge > qp->rdma_write_sgl_depth) {
				err = -EINVAL;
				break;
			}
			if (ib_wr->send_flags & IB_SEND_FENCE) {
				flags |= SQ_READ_FENCE;
			}
			wr.sqwr.rdma_write.remote_stag =
			    cpu_to_be32(ib_wr->wr.rdma.rkey);
			wr.sqwr.rdma_write.remote_to =
			    cpu_to_be64(ib_wr->wr.rdma.remote_addr);
			err = move_sgl((struct c2_data_addr *)
				       & (wr.sqwr.rdma_write.data),
				       ib_wr->sg_list,
				       ib_wr->num_sge,
				       &tot_len, &actual_sge_count);
			wr.sqwr.rdma_write.sge_len = cpu_to_be32(tot_len);
			c2_wr_set_sge_count(&wr, actual_sge_count);
			break;
		case IB_WR_RDMA_READ:
			c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_READ);
			msg_size = sizeof(struct c2wr_rdma_read_req);

			/* IWarp only suppots 1 sge for RDMA reads */
			if (ib_wr->num_sge > 1) {
				err = -EINVAL;
				break;
			}

			/*
			 * Move the local and remote stag/to/len into the WR.
			 */
			wr.sqwr.rdma_read.local_stag =
			    cpu_to_be32(ib_wr->sg_list->lkey);
			wr.sqwr.rdma_read.local_to =
			    cpu_to_be64(ib_wr->sg_list->addr);
			wr.sqwr.rdma_read.remote_stag =
			    cpu_to_be32(ib_wr->wr.rdma.rkey);
			wr.sqwr.rdma_read.remote_to =
			    cpu_to_be64(ib_wr->wr.rdma.remote_addr);
			wr.sqwr.rdma_read.length =
			    cpu_to_be32(ib_wr->sg_list->length);
			break;
		default:
			/* error */
			msg_size = 0;
			err = -EINVAL;
			break;
		}

		/*
		 * If we had an error on the last wr build, then
		 * break out.  Possible errors include bogus WR
		 * type, and a bogus SGL length...
		 */
		if (err) {
			break;
		}

		/*
		 * Store flags
		 */
		c2_wr_set_flags(&wr, flags);

		/*
		 * Post the puppy!
		 */
		spin_lock_irqsave(&qp->lock, lock_flags);
		err = qp_wr_post(&qp->sq_mq, &wr, qp, msg_size);
		if (err) {
			spin_unlock_irqrestore(&qp->lock, lock_flags);
			break;
		}

		/*
		 * Enqueue mq index to activity FIFO.
		 */
		c2_activity(c2dev, qp->sq_mq.index, qp->sq_mq.hint_count);
		spin_unlock_irqrestore(&qp->lock, lock_flags);

		ib_wr = ib_wr->next;
	}

	if (err)
		*bad_wr = ib_wr;
	return err;
}
Пример #2
0
static int pick_formal_ino_2(struct gfs2_sbd *sdp, u64 *formal_ino)
{
	struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode);
	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_inum_inode);
	struct gfs2_holder gh;
	struct buffer_head *bh;
	struct gfs2_inum_range_host ir;
	int error;

	error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
	if (error)
		return error;

	error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
	if (error)
		goto out;
	mutex_lock(&sdp->sd_inum_mutex);

	error = gfs2_meta_inode_buffer(ip, &bh);
	if (error)
		goto out_end_trans;

	gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode));

	if (!ir.ir_length) {
		struct buffer_head *m_bh;
		u64 x, y;
		__be64 z;

		error = gfs2_meta_inode_buffer(m_ip, &m_bh);
		if (error)
			goto out_brelse;

		z = *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode));
		x = y = be64_to_cpu(z);
		ir.ir_start = x;
		ir.ir_length = GFS2_INUM_QUANTUM;
		x += GFS2_INUM_QUANTUM;
		if (x < y)
			gfs2_consist_inode(m_ip);
		z = cpu_to_be64(x);
		gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1);
		*(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = z;

		brelse(m_bh);
	}

	*formal_ino = ir.ir_start++;
	ir.ir_length--;

	gfs2_trans_add_bh(ip->i_gl, bh, 1);
	gfs2_inum_range_out(&ir, bh->b_data + sizeof(struct gfs2_dinode));

out_brelse:
	brelse(bh);
out_end_trans:
	mutex_unlock(&sdp->sd_inum_mutex);
	gfs2_trans_end(sdp);
out:
	gfs2_glock_dq_uninit(&gh);
	return error;
}
Пример #3
0
int c2_init_cq(struct c2_dev *c2dev, int entries,
	       struct c2_ucontext *ctx, struct c2_cq *cq)
{
	struct c2wr_cq_create_req wr;
	struct c2wr_cq_create_rep *reply;
	unsigned long peer_pa;
	struct c2_vq_req *vq_req;
	int err;

	might_sleep();

	cq->ibcq.cqe = entries - 1;
	cq->is_kernel = !ctx;

	/* Allocate a shared pointer */
	cq->mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
				      &cq->mq.shared_dma, GFP_KERNEL);
	if (!cq->mq.shared)
		return -ENOMEM;

	/* Allocate pages for the message pool */
	err = c2_alloc_cq_buf(c2dev, &cq->mq, entries + 1, C2_CQ_MSG_SIZE);
	if (err)
		goto bail0;

	vq_req = vq_req_alloc(c2dev);
	if (!vq_req) {
		err = -ENOMEM;
		goto bail1;
	}

	memset(&wr, 0, sizeof(wr));
	c2_wr_set_id(&wr, CCWR_CQ_CREATE);
	wr.hdr.context = (unsigned long) vq_req;
	wr.rnic_handle = c2dev->adapter_handle;
	wr.msg_size = cpu_to_be32(cq->mq.msg_size);
	wr.depth = cpu_to_be32(cq->mq.q_size);
	wr.shared_ht = cpu_to_be64(cq->mq.shared_dma);
	wr.msg_pool = cpu_to_be64(cq->mq.host_dma);
	wr.user_context = (u64) (unsigned long) (cq);

	vq_req_get(c2dev, vq_req);

	err = vq_send_wr(c2dev, (union c2wr *) & wr);
	if (err) {
		vq_req_put(c2dev, vq_req);
		goto bail2;
	}

	err = vq_wait_for_reply(c2dev, vq_req);
	if (err)
		goto bail2;

	reply = (struct c2wr_cq_create_rep *) (unsigned long) (vq_req->reply_msg);
	if (!reply) {
		err = -ENOMEM;
		goto bail2;
	}

	if ((err = c2_errno(reply)) != 0)
		goto bail3;

	cq->adapter_handle = reply->cq_handle;
	cq->mq.index = be32_to_cpu(reply->mq_index);

	peer_pa = c2dev->pa + be32_to_cpu(reply->adapter_shared);
	cq->mq.peer = ioremap_nocache(peer_pa, PAGE_SIZE);
	if (!cq->mq.peer) {
		err = -ENOMEM;
		goto bail3;
	}

	vq_repbuf_free(c2dev, reply);
	vq_req_free(c2dev, vq_req);

	spin_lock_init(&cq->lock);
	atomic_set(&cq->refcount, 1);
	init_waitqueue_head(&cq->wait);

	/*
	 * Use the MQ index allocated by the adapter to
	 * store the CQ in the qptr_array
	 */
	cq->cqn = cq->mq.index;
	c2dev->qptr_array[cq->cqn] = cq;

	return 0;

bail3:
	vq_repbuf_free(c2dev, reply);
bail2:
	vq_req_free(c2dev, vq_req);
bail1:
	c2_free_cq_buf(c2dev, &cq->mq);
bail0:
	c2_free_mqsp(cq->mq.shared);

	return err;
}
Пример #4
0
/**
 * mpi_write_to_sgl() - Funnction exports MPI to an sgl (msb first)
 *
 * This function works in the same way as the mpi_read_buffer, but it
 * takes an sgl instead of u8 * buf.
 *
 * @a:		a multi precision integer
 * @sgl:	scatterlist to write to. Needs to be at least
 *		mpi_get_size(a) long.
 * @nbytes:	in/out param - it has the be set to the maximum number of
 *		bytes that can be written to sgl. This has to be at least
 *		the size of the integer a. On return it receives the actual
 *		length of the data written on success or the data that would
 *		be written if buffer was too small.
 * @sign:	if not NULL, it will be set to the sign of a.
 *
 * Return:	0 on success or error code in case of error
 */
int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
		     int *sign)
{
	u8 *p, *p2;
#if BYTES_PER_MPI_LIMB == 4
	__be32 alimb;
#elif BYTES_PER_MPI_LIMB == 8
	__be64 alimb;
#else
#error please implement for this limb size.
#endif
	unsigned int n = mpi_get_size(a);
	int i, x, y = 0, lzeros, buf_len;

	if (!nbytes)
		return -EINVAL;

	if (sign)
		*sign = a->sign;

	lzeros = count_lzeros(a);

	if (*nbytes < n - lzeros) {
		*nbytes = n - lzeros;
		return -EOVERFLOW;
	}

	*nbytes = n - lzeros;
	buf_len = sgl->length;
	p2 = sg_virt(sgl);

	for (i = a->nlimbs - 1 - lzeros / BYTES_PER_MPI_LIMB,
			lzeros %= BYTES_PER_MPI_LIMB;
		i >= 0; i--) {
#if BYTES_PER_MPI_LIMB == 4
		alimb = cpu_to_be32(a->d[i]);
#elif BYTES_PER_MPI_LIMB == 8
		alimb = cpu_to_be64(a->d[i]);
#else
#error please implement for this limb size.
#endif
		if (lzeros) {
			y = lzeros;
			lzeros = 0;
		}

		p = (u8 *)&alimb + y;

		for (x = 0; x < sizeof(alimb) - y; x++) {
			if (!buf_len) {
				sgl = sg_next(sgl);
				if (!sgl)
					return -EINVAL;
				buf_len = sgl->length;
				p2 = sg_virt(sgl);
			}
			*p2++ = *p++;
			buf_len--;
		}
		y = 0;
	}
	return 0;
}
Пример #5
0
static void s390x_write_elf64_todcmp(Note *note, S390CPU *cpu)
{
    note->hdr.n_type = cpu_to_be32(NT_S390_TODCMP);
    note->contents.todcmp = cpu_to_be64((uint64_t)(cpu->env.ckc));
}
Пример #6
0
int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
{
	int written = 0;
	__be64 olen;
	s64 len, rc;
	unsigned long flags;
	__be64 evt;

	if (!opal.entry)
		return -ENODEV;

	/* We want put_chars to be atomic to avoid mangling of hvsi
	 * packets. To do that, we first test for room and return
	 * -EAGAIN if there isn't enough.
	 *
	 * Unfortunately, opal_console_write_buffer_space() doesn't
	 * appear to work on opal v1, so we just assume there is
	 * enough room and be done with it
	 */
	spin_lock_irqsave(&opal_write_lock, flags);
	if (firmware_has_feature(FW_FEATURE_OPALv2)) {
		rc = opal_console_write_buffer_space(vtermno, &olen);
		len = be64_to_cpu(olen);
		if (rc || len < total_len) {
			spin_unlock_irqrestore(&opal_write_lock, flags);
			/* Closed -> drop characters */
			if (rc)
				return total_len;
			opal_poll_events(NULL);
			return -EAGAIN;
		}
	}

	/* We still try to handle partial completions, though they
	 * should no longer happen.
	 */
	rc = OPAL_BUSY;
	while(total_len > 0 && (rc == OPAL_BUSY ||
				rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) {
		olen = cpu_to_be64(total_len);
		rc = opal_console_write(vtermno, &olen, data);
		len = be64_to_cpu(olen);

		/* Closed or other error drop */
		if (rc != OPAL_SUCCESS && rc != OPAL_BUSY &&
		    rc != OPAL_BUSY_EVENT) {
			written = total_len;
			break;
		}
		if (rc == OPAL_SUCCESS) {
			total_len -= len;
			data += len;
			written += len;
		}
		/* This is a bit nasty but we need that for the console to
		 * flush when there aren't any interrupts. We will clean
		 * things a bit later to limit that to synchronous path
		 * such as the kernel console and xmon/udbg
		 */
		do
			opal_poll_events(&evt);
		while(rc == OPAL_SUCCESS &&
			(be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT));
	}
	spin_unlock_irqrestore(&opal_write_lock, flags);
	return written;
}
Пример #7
0
/* This is an inline function, we don't really care about a long
 * list of arguments */
static inline int
__build_packet_message(struct nfulnl_instance *inst,
			const struct sk_buff *skb,
			unsigned int data_len,
			u_int8_t pf,
			unsigned int hooknum,
			const struct net_device *indev,
			const struct net_device *outdev,
			const char *prefix, unsigned int plen)
{
	struct nfulnl_msg_packet_hdr pmsg;
	struct nlmsghdr *nlh;
	struct nfgenmsg *nfmsg;
	sk_buff_data_t old_tail = inst->skb->tail;

	nlh = NLMSG_PUT(inst->skb, 0, 0,
			NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
			sizeof(struct nfgenmsg));
	nfmsg = NLMSG_DATA(nlh);
	nfmsg->nfgen_family = pf;
	nfmsg->version = NFNETLINK_V0;
	nfmsg->res_id = htons(inst->group_num);

	pmsg.hw_protocol	= skb->protocol;
	pmsg.hook		= hooknum;

	NLA_PUT(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg);

	if (prefix)
		NLA_PUT(inst->skb, NFULA_PREFIX, plen, prefix);

	if (indev) {
#ifndef CONFIG_BRIDGE_NETFILTER
		NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
			     htonl(indev->ifindex));
#else
		if (pf == PF_BRIDGE) {
			/* Case 1: outdev is physical input device, we need to
			 * look for bridge group (when called from
			 * netfilter_bridge) */
			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
				     htonl(indev->ifindex));
			/* this is the bridge group "brX" */
			/* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
				     htonl(br_port_get_rcu(indev)->br->dev->ifindex));
		} else {
			/* Case 2: indev is bridge group, we need to look for
			 * physical device (when called from ipv4) */
			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
				     htonl(indev->ifindex));
			if (skb->nf_bridge && skb->nf_bridge->physindev)
				NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
					     htonl(skb->nf_bridge->physindev->ifindex));
		}
#endif
	}

	if (outdev) {
#ifndef CONFIG_BRIDGE_NETFILTER
		NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
			     htonl(outdev->ifindex));
#else
		if (pf == PF_BRIDGE) {
			/* Case 1: outdev is physical output device, we need to
			 * look for bridge group (when called from
			 * netfilter_bridge) */
			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
				     htonl(outdev->ifindex));
			/* this is the bridge group "brX" */
			/* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
				     htonl(br_port_get_rcu(outdev)->br->dev->ifindex));
		} else {
			/* Case 2: indev is a bridge group, we need to look
			 * for physical device (when called from ipv4) */
			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
				     htonl(outdev->ifindex));
			if (skb->nf_bridge && skb->nf_bridge->physoutdev)
				NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
					     htonl(skb->nf_bridge->physoutdev->ifindex));
		}
#endif
	}

	if (skb->mark)
		NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark));

	if (indev && skb->dev &&
	    skb->mac_header != skb->network_header) {
		struct nfulnl_msg_packet_hw phw;
		int len = dev_parse_header(skb, phw.hw_addr);
		if (len > 0) {
			phw.hw_addrlen = htons(len);
			NLA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw);
		}
	}

	if (indev && skb_mac_header_was_set(skb)) {
		NLA_PUT_BE16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type));
		NLA_PUT_BE16(inst->skb, NFULA_HWLEN,
			     htons(skb->dev->hard_header_len));
		NLA_PUT(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
			skb_mac_header(skb));
	}

	if (skb->tstamp.tv64) {
		struct nfulnl_msg_packet_timestamp ts;
		struct timeval tv = ktime_to_timeval(skb->tstamp);
		ts.sec = cpu_to_be64(tv.tv_sec);
		ts.usec = cpu_to_be64(tv.tv_usec);

		NLA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts);
	}

	/* UID */
	if (skb->sk) {
		read_lock_bh(&skb->sk->sk_callback_lock);
		if (skb->sk->sk_socket && skb->sk->sk_socket->file) {
			struct file *file = skb->sk->sk_socket->file;
			__be32 uid = htonl(file->f_cred->fsuid);
			__be32 gid = htonl(file->f_cred->fsgid);
			/* need to unlock here since NLA_PUT may goto */
			read_unlock_bh(&skb->sk->sk_callback_lock);
			NLA_PUT_BE32(inst->skb, NFULA_UID, uid);
			NLA_PUT_BE32(inst->skb, NFULA_GID, gid);
		} else
			read_unlock_bh(&skb->sk->sk_callback_lock);
	}

	/* local sequence number */
	if (inst->flags & NFULNL_CFG_F_SEQ)
		NLA_PUT_BE32(inst->skb, NFULA_SEQ, htonl(inst->seq++));

	/* global sequence number */
	if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
		NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
			     htonl(atomic_inc_return(&global_seq)));

	if (data_len) {
		struct nlattr *nla;
		int size = nla_attr_size(data_len);

		if (skb_tailroom(inst->skb) < nla_total_size(data_len)) {
			printk(KERN_WARNING "nfnetlink_log: no tailroom!\n");
			goto nlmsg_failure;
		}

		nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len));
		nla->nla_type = NFULA_PAYLOAD;
		nla->nla_len = size;

		if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
			BUG();
	}

	nlh->nlmsg_len = inst->skb->tail - old_tail;
	return 0;

nlmsg_failure:
nla_put_failure:
	PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n");
	return -1;
}
Пример #8
0
static int mvs_64xx_init(struct mvs_info *mvi)
{
	void __iomem *regs = mvi->regs;
	int i;
	u32 tmp, cctl;

	if (mvi->pdev && mvi->pdev->revision == 0)
		mvi->flags |= MVF_PHY_PWR_FIX;
	if (!(mvi->flags & MVF_FLAG_SOC)) {
		mvs_show_pcie_usage(mvi);
		tmp = mvs_64xx_chip_reset(mvi);
		if (tmp)
			return tmp;
	} else {
		tmp = mr32(MVS_PHY_CTL);
		tmp &= ~PCTL_PWR_OFF;
		tmp |= PCTL_PHY_DSBL;
		mw32(MVS_PHY_CTL, tmp);
	}

	/* Init Chip */
	/* make sure RST is set; HBA_RST /should/ have done that for us */
	cctl = mr32(MVS_CTL) & 0xFFFF;
	if (cctl & CCTL_RST)
		cctl &= ~CCTL_RST;
	else
		mw32_f(MVS_CTL, cctl | CCTL_RST);

	if (!(mvi->flags & MVF_FLAG_SOC)) {
		/* write to device control _AND_ device status register */
		pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
		tmp &= ~PRD_REQ_MASK;
		tmp |= PRD_REQ_SIZE;
		pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);

		pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
		tmp &= ~PCTL_PWR_OFF;
		tmp &= ~PCTL_PHY_DSBL;
		pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);

		pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
		tmp &= PCTL_PWR_OFF;
		tmp &= ~PCTL_PHY_DSBL;
		pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
	} else {
		tmp = mr32(MVS_PHY_CTL);
		tmp &= ~PCTL_PWR_OFF;
		tmp |= PCTL_COM_ON;
		tmp &= ~PCTL_PHY_DSBL;
		tmp |= PCTL_LINK_RST;
		mw32(MVS_PHY_CTL, tmp);
		msleep(100);
		tmp &= ~PCTL_LINK_RST;
		mw32(MVS_PHY_CTL, tmp);
		msleep(100);
	}

	/* reset control */
	mw32(MVS_PCS, 0);		/* MVS_PCS */
	/* init phys */
	mvs_64xx_phy_hacks(mvi);

	tmp = mvs_cr32(mvi, CMD_PHY_MODE_21);
	tmp &= 0x0000ffff;
	tmp |= 0x00fa0000;
	mvs_cw32(mvi, CMD_PHY_MODE_21, tmp);

	/* enable auto port detection */
	mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN);

	mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
	mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);

	mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
	mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);

	mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
	mw32(MVS_TX_LO, mvi->tx_dma);
	mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);

	mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
	mw32(MVS_RX_LO, mvi->rx_dma);
	mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);

	for (i = 0; i < mvi->chip->n_phy; i++) {
		/* set phy local SAS address */
		/* should set little endian SAS address to 64xx chip */
		mvs_set_sas_addr(mvi, i, PHYR_ADDR_LO, PHYR_ADDR_HI,
				cpu_to_be64(mvi->phy[i].dev_sas_addr));

		mvs_64xx_enable_xmt(mvi, i);

		mvs_64xx_phy_reset(mvi, i, MVS_HARD_RESET);
		msleep(500);
		mvs_64xx_detect_porttype(mvi, i);
	}
	if (mvi->flags & MVF_FLAG_SOC) {
		/* set select registers */
		writel(0x0E008000, regs + 0x000);
		writel(0x59000008, regs + 0x004);
		writel(0x20, regs + 0x008);
		writel(0x20, regs + 0x00c);
		writel(0x20, regs + 0x010);
		writel(0x20, regs + 0x014);
		writel(0x20, regs + 0x018);
		writel(0x20, regs + 0x01c);
	}
	for (i = 0; i < mvi->chip->n_phy; i++) {
		/* clear phy int status */
		tmp = mvs_read_port_irq_stat(mvi, i);
		tmp &= ~PHYEV_SIG_FIS;
		mvs_write_port_irq_stat(mvi, i, tmp);

		/* set phy int mask */
		tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
			PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR |
			PHYEV_DEC_ERR;
		mvs_write_port_irq_mask(mvi, i, tmp);

		msleep(100);
		mvs_update_phyinfo(mvi, i, 1);
	}

	/* little endian for open address and command table, etc. */
	cctl = mr32(MVS_CTL);
	cctl |= CCTL_ENDIAN_CMD;
	cctl |= CCTL_ENDIAN_DATA;
	cctl &= ~CCTL_ENDIAN_OPEN;
	cctl |= CCTL_ENDIAN_RSP;
	mw32_f(MVS_CTL, cctl);

	/* reset CMD queue */
	tmp = mr32(MVS_PCS);
	tmp |= PCS_CMD_RST;
	tmp &= ~PCS_SELF_CLEAR;
	mw32(MVS_PCS, tmp);
	/*
	 * the max count is 0x1ff, while our max slot is 0x200,
	 * it will make count 0.
	 */
	tmp = 0;
	if (MVS_CHIP_SLOT_SZ > 0x1ff)
		mw32(MVS_INT_COAL, 0x1ff | COAL_EN);
	else
		mw32(MVS_INT_COAL, MVS_CHIP_SLOT_SZ | COAL_EN);

	tmp = 0x10000 | interrupt_coalescing;
	mw32(MVS_INT_COAL_TMOUT, tmp);

	/* ladies and gentlemen, start your engines */
	mw32(MVS_TX_CFG, 0);
	mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
	mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
	/* enable CMD/CMPL_Q/RESP mode */
	mw32(MVS_PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN |
		PCS_CMD_EN | PCS_CMD_STOP_ERR);

	/* enable completion queue interrupt */
	tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
		CINT_DMA_PCIE);

	mw32(MVS_INT_MASK, tmp);

	/* Enable SRS interrupt */
	mw32(MVS_INT_MASK_SRS_0, 0xFFFF);

	return 0;
}
Пример #9
0
static inline void u128_to_be128(be128 *dst, const u128 *src)
{
	dst->a = cpu_to_be64(src->a);
	dst->b = cpu_to_be64(src->b);
}
Пример #10
0
/*
 * Add an entry to a block directory.
 */
int						/* error */
xfs_dir2_block_addname(
	xfs_da_args_t		*args)		/* directory op arguments */
{
	xfs_dir2_data_hdr_t	*hdr;		/* block header */
	xfs_dir2_leaf_entry_t	*blp;		/* block leaf entries */
	struct xfs_buf		*bp;		/* buffer for block */
	xfs_dir2_block_tail_t	*btp;		/* block tail */
	int			compact;	/* need to compact leaf ents */
	xfs_dir2_data_entry_t	*dep;		/* block data entry */
	xfs_inode_t		*dp;		/* directory inode */
	xfs_dir2_data_unused_t	*dup;		/* block unused entry */
	int			error;		/* error return value */
	xfs_dir2_data_unused_t	*enddup=NULL;	/* unused at end of data */
	xfs_dahash_t		hash;		/* hash value of found entry */
	int			high;		/* high index for binary srch */
	int			highstale;	/* high stale index */
	int			lfloghigh=0;	/* last final leaf to log */
	int			lfloglow=0;	/* first final leaf to log */
	int			len;		/* length of the new entry */
	int			low;		/* low index for binary srch */
	int			lowstale;	/* low stale index */
	int			mid=0;		/* midpoint for binary srch */
	int			needlog;	/* need to log header */
	int			needscan;	/* need to rescan freespace */
	__be16			*tagp;		/* pointer to tag value */
	xfs_trans_t		*tp;		/* transaction structure */

	trace_xfs_dir2_block_addname(args);

	dp = args->dp;
	tp = args->trans;

	/* Read the (one and only) directory block into bp. */
	error = xfs_dir3_block_read(tp, dp, &bp);
	if (error)
		return error;

	len = dp->d_ops->data_entsize(args->namelen);

	/*
	 * Set up pointers to parts of the block.
	 */
	hdr = bp->b_addr;
	btp = xfs_dir2_block_tail_p(args->geo, hdr);
	blp = xfs_dir2_block_leaf_p(btp);

	/*
	 * Find out if we can reuse stale entries or whether we need extra
	 * space for entry and new leaf.
	 */
	xfs_dir2_block_need_space(dp, hdr, btp, blp, &tagp, &dup,
				  &enddup, &compact, len);

	/*
	 * Done everything we need for a space check now.
	 */
	if (args->op_flags & XFS_DA_OP_JUSTCHECK) {
		xfs_trans_brelse(tp, bp);
		if (!dup)
			return -ENOSPC;
		return 0;
	}

	/*
	 * If we don't have space for the new entry & leaf ...
	 */
	if (!dup) {
		/* Don't have a space reservation: return no-space.  */
		if (args->total == 0)
			return -ENOSPC;
		/*
		 * Convert to the next larger format.
		 * Then add the new entry in that format.
		 */
		error = xfs_dir2_block_to_leaf(args, bp);
		if (error)
			return error;
		return xfs_dir2_leaf_addname(args);
	}

	needlog = needscan = 0;

	/*
	 * If need to compact the leaf entries, do it now.
	 */
	if (compact) {
		xfs_dir2_block_compact(args, bp, hdr, btp, blp, &needlog,
				      &lfloghigh, &lfloglow);
		/* recalculate blp post-compaction */
		blp = xfs_dir2_block_leaf_p(btp);
	} else if (btp->stale) {
		/*
		 * Set leaf logging boundaries to impossible state.
		 * For the no-stale case they're set explicitly.
		 */
		lfloglow = be32_to_cpu(btp->count);
		lfloghigh = -1;
	}

	/*
	 * Find the slot that's first lower than our hash value, -1 if none.
	 */
	for (low = 0, high = be32_to_cpu(btp->count) - 1; low <= high; ) {
		mid = (low + high) >> 1;
		if ((hash = be32_to_cpu(blp[mid].hashval)) == args->hashval)
			break;
		if (hash < args->hashval)
			low = mid + 1;
		else
			high = mid - 1;
	}
	while (mid >= 0 && be32_to_cpu(blp[mid].hashval) >= args->hashval) {
		mid--;
	}
	/*
	 * No stale entries, will use enddup space to hold new leaf.
	 */
	if (!btp->stale) {
		/*
		 * Mark the space needed for the new leaf entry, now in use.
		 */
		xfs_dir2_data_use_free(args, bp, enddup,
			(xfs_dir2_data_aoff_t)
			((char *)enddup - (char *)hdr + be16_to_cpu(enddup->length) -
			 sizeof(*blp)),
			(xfs_dir2_data_aoff_t)sizeof(*blp),
			&needlog, &needscan);
		/*
		 * Update the tail (entry count).
		 */
		be32_add_cpu(&btp->count, 1);
		/*
		 * If we now need to rebuild the bestfree map, do so.
		 * This needs to happen before the next call to use_free.
		 */
		if (needscan) {
			xfs_dir2_data_freescan(dp, hdr, &needlog);
			needscan = 0;
		}
		/*
		 * Adjust pointer to the first leaf entry, we're about to move
		 * the table up one to open up space for the new leaf entry.
		 * Then adjust our index to match.
		 */
		blp--;
		mid++;
		if (mid)
			memmove(blp, &blp[1], mid * sizeof(*blp));
		lfloglow = 0;
		lfloghigh = mid;
	}
	/*
	 * Use a stale leaf for our new entry.
	 */
	else {
		for (lowstale = mid;
		     lowstale >= 0 &&
			blp[lowstale].address !=
			cpu_to_be32(XFS_DIR2_NULL_DATAPTR);
		     lowstale--)
			continue;
		for (highstale = mid + 1;
		     highstale < be32_to_cpu(btp->count) &&
			blp[highstale].address !=
			cpu_to_be32(XFS_DIR2_NULL_DATAPTR) &&
			(lowstale < 0 || mid - lowstale > highstale - mid);
		     highstale++)
			continue;
		/*
		 * Move entries toward the low-numbered stale entry.
		 */
		if (lowstale >= 0 &&
		    (highstale == be32_to_cpu(btp->count) ||
		     mid - lowstale <= highstale - mid)) {
			if (mid - lowstale)
				memmove(&blp[lowstale], &blp[lowstale + 1],
					(mid - lowstale) * sizeof(*blp));
			lfloglow = MIN(lowstale, lfloglow);
			lfloghigh = MAX(mid, lfloghigh);
		}
		/*
		 * Move entries toward the high-numbered stale entry.
		 */
		else {
			ASSERT(highstale < be32_to_cpu(btp->count));
			mid++;
			if (highstale - mid)
				memmove(&blp[mid + 1], &blp[mid],
					(highstale - mid) * sizeof(*blp));
			lfloglow = MIN(mid, lfloglow);
			lfloghigh = MAX(highstale, lfloghigh);
		}
		be32_add_cpu(&btp->stale, -1);
	}
	/*
	 * Point to the new data entry.
	 */
	dep = (xfs_dir2_data_entry_t *)dup;
	/*
	 * Fill in the leaf entry.
	 */
	blp[mid].hashval = cpu_to_be32(args->hashval);
	blp[mid].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(
				(char *)dep - (char *)hdr));
	xfs_dir2_block_log_leaf(tp, bp, lfloglow, lfloghigh);
	/*
	 * Mark space for the data entry used.
	 */
	xfs_dir2_data_use_free(args, bp, dup,
		(xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr),
		(xfs_dir2_data_aoff_t)len, &needlog, &needscan);
	/*
	 * Create the new data entry.
	 */
	dep->inumber = cpu_to_be64(args->inumber);
	dep->namelen = args->namelen;
	memcpy(dep->name, args->name, args->namelen);
	dp->d_ops->data_put_ftype(dep, args->filetype);
	tagp = dp->d_ops->data_entry_tag_p(dep);
	*tagp = cpu_to_be16((char *)dep - (char *)hdr);
	/*
	 * Clean up the bestfree array and log the header, tail, and entry.
	 */
	if (needscan)
		xfs_dir2_data_freescan(dp, hdr, &needlog);
	if (needlog)
		xfs_dir2_data_log_header(args, bp);
	xfs_dir2_block_log_tail(tp, bp);
	xfs_dir2_data_log_entry(args, bp, dep);
	xfs_dir3_data_check(dp, bp);
	return 0;
}
static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
			   struct buffer_head *bh_map, struct metapath *mp,
			   const unsigned int sheight,
			   const unsigned int height,
			   const unsigned int maxlen)
{
	struct gfs2_inode *ip = GFS2_I(inode);
	struct gfs2_sbd *sdp = GFS2_SB(inode);
	struct buffer_head *dibh = mp->mp_bh[0];
	u64 bn, dblock = 0;
	unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
	unsigned dblks = 0;
	unsigned ptrs_per_blk;
	const unsigned end_of_metadata = height - 1;
	int eob = 0;
	enum alloc_state state;
	__be64 *ptr;
	__be64 zero_bn = 0;

	BUG_ON(sheight < 1);
	BUG_ON(dibh == NULL);

	gfs2_trans_add_bh(ip->i_gl, dibh, 1);

	if (height == sheight) {
		struct buffer_head *bh;
		/* Bottom indirect block exists, find unalloced extent size */
		ptr = metapointer(end_of_metadata, mp);
		bh = mp->mp_bh[end_of_metadata];
		dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen,
					   &eob);
		BUG_ON(dblks < 1);
		state = ALLOC_DATA;
	} else {
		/* Need to allocate indirect blocks */
		ptrs_per_blk = height > 1 ? sdp->sd_inptrs : sdp->sd_diptrs;
		dblks = min(maxlen, ptrs_per_blk - mp->mp_list[end_of_metadata]);
		if (height == ip->i_height) {
			/* Writing into existing tree, extend tree down */
			iblks = height - sheight;
			state = ALLOC_GROW_DEPTH;
		} else {
			/* Building up tree height */
			state = ALLOC_GROW_HEIGHT;
			iblks = height - ip->i_height;
			branch_start = metapath_branch_start(mp);
			iblks += (height - branch_start);
		}
	}

	/* start of the second part of the function (state machine) */

	blks = dblks + iblks;
	i = sheight;
	do {
		int error;
		n = blks - alloced;
		error = gfs2_alloc_block(ip, &bn, &n);
		if (error)
			return error;
		alloced += n;
		if (state != ALLOC_DATA || gfs2_is_jdata(ip))
			gfs2_trans_add_unrevoke(sdp, bn, n);
		switch (state) {
		/* Growing height of tree */
		case ALLOC_GROW_HEIGHT:
			if (i == 1) {
				ptr = (__be64 *)(dibh->b_data +
						 sizeof(struct gfs2_dinode));
				zero_bn = *ptr;
			}
			for (; i - 1 < height - ip->i_height && n > 0; i++, n--)
				gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
			if (i - 1 == height - ip->i_height) {
				i--;
				gfs2_buffer_copy_tail(mp->mp_bh[i],
						sizeof(struct gfs2_meta_header),
						dibh, sizeof(struct gfs2_dinode));
				gfs2_buffer_clear_tail(dibh,
						sizeof(struct gfs2_dinode) +
						sizeof(__be64));
				ptr = (__be64 *)(mp->mp_bh[i]->b_data +
					sizeof(struct gfs2_meta_header));
				*ptr = zero_bn;
				state = ALLOC_GROW_DEPTH;
				for(i = branch_start; i < height; i++) {
					if (mp->mp_bh[i] == NULL)
						break;
					brelse(mp->mp_bh[i]);
					mp->mp_bh[i] = NULL;
				}
				i = branch_start;
			}
			if (n == 0)
				break;
		/* Branching from existing tree */
		case ALLOC_GROW_DEPTH:
			if (i > 1 && i < height)
				gfs2_trans_add_bh(ip->i_gl, mp->mp_bh[i-1], 1);
			for (; i < height && n > 0; i++, n--)
				gfs2_indirect_init(mp, ip->i_gl, i,
						   mp->mp_list[i-1], bn++);
			if (i == height)
				state = ALLOC_DATA;
			if (n == 0)
				break;
		/* Tree complete, adding data blocks */
		case ALLOC_DATA:
			BUG_ON(n > dblks);
			BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
			gfs2_trans_add_bh(ip->i_gl, mp->mp_bh[end_of_metadata], 1);
			dblks = n;
			ptr = metapointer(end_of_metadata, mp);
			dblock = bn;
			while (n-- > 0)
				*ptr++ = cpu_to_be64(bn++);
			break;
		}
	} while ((state != ALLOC_DATA) || !dblock);

	ip->i_height = height;
	gfs2_add_inode_blocks(&ip->i_inode, alloced);
	gfs2_dinode_out(ip, mp->mp_bh[0]->b_data);
	map_bh(bh_map, inode->i_sb, dblock);
	bh_map->b_size = dblks << inode->i_blkbits;
	set_buffer_new(bh_map);
	return 0;
}
Пример #12
0
/*
 * Convert the shortform directory to block form.
 */
int						/* error */
xfs_dir2_sf_to_block(
	xfs_da_args_t		*args)		/* operation arguments */
{
	xfs_dir2_db_t		blkno;		/* dir-relative block # (0) */
	xfs_dir2_data_hdr_t	*hdr;		/* block header */
	xfs_dir2_leaf_entry_t	*blp;		/* block leaf entries */
	struct xfs_buf		*bp;		/* block buffer */
	xfs_dir2_block_tail_t	*btp;		/* block tail pointer */
	xfs_dir2_data_entry_t	*dep;		/* data entry pointer */
	xfs_inode_t		*dp;		/* incore directory inode */
	int			dummy;		/* trash */
	xfs_dir2_data_unused_t	*dup;		/* unused entry pointer */
	int			endoffset;	/* end of data objects */
	int			error;		/* error return value */
	int			i;		/* index */
	xfs_mount_t		*mp;		/* filesystem mount point */
	int			needlog;	/* need to log block header */
	int			needscan;	/* need to scan block freespc */
	int			newoffset;	/* offset from current entry */
	int			offset;		/* target block offset */
	xfs_dir2_sf_entry_t	*sfep;		/* sf entry pointer */
	xfs_dir2_sf_hdr_t	*oldsfp;	/* old shortform header  */
	xfs_dir2_sf_hdr_t	*sfp;		/* shortform header  */
	__be16			*tagp;		/* end of data entry */
	xfs_trans_t		*tp;		/* transaction pointer */
	struct xfs_name		name;
	struct xfs_ifork	*ifp;

	trace_xfs_dir2_sf_to_block(args);

	dp = args->dp;
	tp = args->trans;
	mp = dp->i_mount;
	ifp = XFS_IFORK_PTR(dp, XFS_DATA_FORK);
	ASSERT(ifp->if_flags & XFS_IFINLINE);
	/*
	 * Bomb out if the shortform directory is way too short.
	 */
	if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) {
		ASSERT(XFS_FORCED_SHUTDOWN(mp));
		return -EIO;
	}

	oldsfp = (xfs_dir2_sf_hdr_t *)ifp->if_u1.if_data;

	ASSERT(ifp->if_bytes == dp->i_d.di_size);
	ASSERT(ifp->if_u1.if_data != NULL);
	ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(oldsfp->i8count));
	ASSERT(dp->i_d.di_nextents == 0);

	/*
	 * Copy the directory into a temporary buffer.
	 * Then pitch the incore inode data so we can make extents.
	 */
	sfp = kmem_alloc(ifp->if_bytes, KM_SLEEP);
	memcpy(sfp, oldsfp, ifp->if_bytes);

	xfs_idata_realloc(dp, -ifp->if_bytes, XFS_DATA_FORK);
	xfs_bmap_local_to_extents_empty(dp, XFS_DATA_FORK);
	dp->i_d.di_size = 0;

	/*
	 * Add block 0 to the inode.
	 */
	error = xfs_dir2_grow_inode(args, XFS_DIR2_DATA_SPACE, &blkno);
	if (error) {
		kmem_free(sfp);
		return error;
	}
	/*
	 * Initialize the data block, then convert it to block format.
	 */
	error = xfs_dir3_data_init(args, blkno, &bp);
	if (error) {
		kmem_free(sfp);
		return error;
	}
	xfs_dir3_block_init(mp, tp, bp, dp);
	hdr = bp->b_addr;

	/*
	 * Compute size of block "tail" area.
	 */
	i = (uint)sizeof(*btp) +
	    (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t);
	/*
	 * The whole thing is initialized to free by the init routine.
	 * Say we're using the leaf and tail area.
	 */
	dup = dp->d_ops->data_unused_p(hdr);
	needlog = needscan = 0;
	xfs_dir2_data_use_free(args, bp, dup, args->geo->blksize - i,
			       i, &needlog, &needscan);
	ASSERT(needscan == 0);
	/*
	 * Fill in the tail.
	 */
	btp = xfs_dir2_block_tail_p(args->geo, hdr);
	btp->count = cpu_to_be32(sfp->count + 2);	/* ., .. */
	btp->stale = 0;
	blp = xfs_dir2_block_leaf_p(btp);
	endoffset = (uint)((char *)blp - (char *)hdr);
	/*
	 * Remove the freespace, we'll manage it.
	 */
	xfs_dir2_data_use_free(args, bp, dup,
		(xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr),
		be16_to_cpu(dup->length), &needlog, &needscan);
	/*
	 * Create entry for .
	 */
	dep = dp->d_ops->data_dot_entry_p(hdr);
	dep->inumber = cpu_to_be64(dp->i_ino);
	dep->namelen = 1;
	dep->name[0] = '.';
	dp->d_ops->data_put_ftype(dep, XFS_DIR3_FT_DIR);
	tagp = dp->d_ops->data_entry_tag_p(dep);
	*tagp = cpu_to_be16((char *)dep - (char *)hdr);
	xfs_dir2_data_log_entry(args, bp, dep);
	blp[0].hashval = cpu_to_be32(xfs_dir_hash_dot);
	blp[0].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(
				(char *)dep - (char *)hdr));
	/*
	 * Create entry for ..
	 */
	dep = dp->d_ops->data_dotdot_entry_p(hdr);
	dep->inumber = cpu_to_be64(dp->d_ops->sf_get_parent_ino(sfp));
	dep->namelen = 2;
	dep->name[0] = dep->name[1] = '.';
	dp->d_ops->data_put_ftype(dep, XFS_DIR3_FT_DIR);
	tagp = dp->d_ops->data_entry_tag_p(dep);
	*tagp = cpu_to_be16((char *)dep - (char *)hdr);
	xfs_dir2_data_log_entry(args, bp, dep);
	blp[1].hashval = cpu_to_be32(xfs_dir_hash_dotdot);
	blp[1].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(
				(char *)dep - (char *)hdr));
	offset = dp->d_ops->data_first_offset;
	/*
	 * Loop over existing entries, stuff them in.
	 */
	i = 0;
	if (!sfp->count)
		sfep = NULL;
	else
		sfep = xfs_dir2_sf_firstentry(sfp);
	/*
	 * Need to preserve the existing offset values in the sf directory.
	 * Insert holes (unused entries) where necessary.
	 */
	while (offset < endoffset) {
		/*
		 * sfep is null when we reach the end of the list.
		 */
		if (sfep == NULL)
			newoffset = endoffset;
		else
			newoffset = xfs_dir2_sf_get_offset(sfep);
		/*
		 * There should be a hole here, make one.
		 */
		if (offset < newoffset) {
			dup = (xfs_dir2_data_unused_t *)((char *)hdr + offset);
			dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
			dup->length = cpu_to_be16(newoffset - offset);
			*xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16(
				((char *)dup - (char *)hdr));
			xfs_dir2_data_log_unused(args, bp, dup);
			xfs_dir2_data_freeinsert(hdr,
						 dp->d_ops->data_bestfree_p(hdr),
						 dup, &dummy);
			offset += be16_to_cpu(dup->length);
			continue;
		}
		/*
		 * Copy a real entry.
		 */
		dep = (xfs_dir2_data_entry_t *)((char *)hdr + newoffset);
		dep->inumber = cpu_to_be64(dp->d_ops->sf_get_ino(sfp, sfep));
		dep->namelen = sfep->namelen;
		dp->d_ops->data_put_ftype(dep, dp->d_ops->sf_get_ftype(sfep));
		memcpy(dep->name, sfep->name, dep->namelen);
		tagp = dp->d_ops->data_entry_tag_p(dep);
		*tagp = cpu_to_be16((char *)dep - (char *)hdr);
		xfs_dir2_data_log_entry(args, bp, dep);
		name.name = sfep->name;
		name.len = sfep->namelen;
		blp[2 + i].hashval = cpu_to_be32(mp->m_dirnameops->
							hashname(&name));
		blp[2 + i].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(
						 (char *)dep - (char *)hdr));
		offset = (int)((char *)(tagp + 1) - (char *)hdr);
		if (++i == sfp->count)
			sfep = NULL;
		else
			sfep = dp->d_ops->sf_nextentry(sfp, sfep);
	}
	/* Done with the temporary buffer */
	kmem_free(sfp);
	/*
	 * Sort the leaf entries by hash value.
	 */
	xfs_sort(blp, be32_to_cpu(btp->count), sizeof(*blp), xfs_dir2_block_sort);
	/*
	 * Log the leaf entry area and tail.
	 * Already logged the header in data_init, ignore needlog.
	 */
	ASSERT(needscan == 0);
	xfs_dir2_block_log_leaf(tp, bp, 0, be32_to_cpu(btp->count) - 1);
	xfs_dir2_block_log_tail(tp, bp);
	xfs_dir3_data_check(dp, bp);
	return 0;
}
Пример #13
0
int main(int argc, char* argv[])
{
	int fdin, fdout;
	void *container = malloc(SECURE_BOOT_HEADERS_SIZE);
	struct stat s;
	char *buf = malloc(4096);
	off_t l;
	void *infile;
	int r;
	ROM_container_raw *c = (ROM_container_raw*)container;
	ROM_prefix_header_raw *ph;
	ROM_prefix_data_raw *pd;
	ROM_sw_header_raw *swh;
	ROM_sw_sig_raw *ssig;

	unsigned char md[SHA512_DIGEST_LENGTH];
	void *p;
	ecc_key_t pubkeyraw;
	ecc_signature_t sigraw;
	int indexptr;

	progname = strrchr (argv[0], '/');
	if (progname != NULL)
		++progname;
	else
		progname = argv[0];

	memset(container, 0, SECURE_BOOT_HEADERS_SIZE);

	while (1) {
		int opt;
		opt = getopt_long(argc, argv, "a:b:c:p:q:r:A:B:C:P:Q:R:L:I:dh", opts, &indexptr);
		if (opt == -1)
			break;

		switch (opt) {
		case 'h':
		case '?':
			usage(EX_OK);
			break;
		case 'd':
			debug = 1;
			break;
		case 'a':
			params.hw_keyfn_a = optarg;
			break;
		case 'b':
			params.hw_keyfn_b = optarg;
			break;
		case 'c':
			params.hw_keyfn_c = optarg;
			break;
		case 'p':
			params.sw_keyfn_p = optarg;
			break;
		case 'q':
			params.sw_keyfn_q = optarg;
			break;
		case 'r':
			params.sw_keyfn_r = optarg;
			break;
		case 'A':
			params.hw_sigfn_a = optarg;
			break;
		case 'B':
			params.hw_sigfn_b = optarg;
			break;
		case 'C':
			params.hw_sigfn_c = optarg;
			break;
		case 'P':
			params.sw_sigfn_p = optarg;
			break;
		case 'Q':
			params.sw_sigfn_q = optarg;
			break;
		case 'R':
			params.sw_sigfn_r = optarg;
			break;
		case 'L':
			params.payloadfn = optarg;
			break;
		case 'I':
			params.imagefn = optarg;
			break;
		case 128:
			params.prhdrfn = optarg;
			break;
		case 129:
			params.swhdrfn = optarg;
			break;
		default:
			usage(EX_USAGE);
		}
	}
//	}

	fdin = open(params.payloadfn, O_RDONLY);
	assert(fdin > 0);
	r = fstat(fdin, &s);
	assert(r==0);
	infile = mmap(NULL, s.st_size, PROT_READ, MAP_PRIVATE, fdin, 0);
	assert(infile);
	fdout = open(params.imagefn, O_WRONLY|O_CREAT|O_TRUNC, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
	assert(fdout > 0);

	c->magic_number = cpu_to_be32(ROM_MAGIC_NUMBER);
	c->version = cpu_to_be16(1);
	c->container_size = cpu_to_be64(SECURE_BOOT_HEADERS_SIZE + s.st_size);
	c->target_hrmor = 0;
	c->stack_pointer = 0;
	memset(c->hw_pkey_a, 0, sizeof(ecc_key_t));
	memset(c->hw_pkey_b, 0, sizeof(ecc_key_t));
	memset(c->hw_pkey_c, 0, sizeof(ecc_key_t));
	if (params.hw_keyfn_a) {
		getPublicKeyRaw(&pubkeyraw, params.hw_keyfn_a);
		memcpy(c->hw_pkey_a, pubkeyraw, sizeof(ecc_key_t));
	}
	if (params.hw_keyfn_b) {
		getPublicKeyRaw(&pubkeyraw, params.hw_keyfn_b);
		memcpy(c->hw_pkey_b, pubkeyraw, sizeof(ecc_key_t));
	}
	if (params.hw_keyfn_c) {
		getPublicKeyRaw(&pubkeyraw, params.hw_keyfn_c);
		memcpy(c->hw_pkey_c, pubkeyraw, sizeof(ecc_key_t));
	}

	ph = container + sizeof(ROM_container_raw);
	ph->ver_alg.version = cpu_to_be16(1);
	ph->ver_alg.hash_alg = 1;
	ph->ver_alg.sig_alg = 1;
	ph->code_start_offset = 0;
	ph->reserved = 0;
	ph->flags = cpu_to_be32(0x80000000);
	memset(ph->payload_hash, 0, sizeof(sha2_hash_t));
	ph->ecid_count = 0;

	pd = (ROM_prefix_data_raw*)ph->ecid;
	memset(pd->hw_sig_a, 0, sizeof(ecc_signature_t));
	memset(pd->hw_sig_b, 0, sizeof(ecc_signature_t));
	memset(pd->hw_sig_c, 0, sizeof(ecc_signature_t));
	if (params.hw_sigfn_a) {
		getSigRaw(&sigraw, params.hw_sigfn_a);
		memcpy(pd->hw_sig_a, sigraw, sizeof(ecc_key_t));
	}
	if (params.hw_sigfn_b) {
		getSigRaw(&sigraw, params.hw_sigfn_b);
		memcpy(pd->hw_sig_b, sigraw, sizeof(ecc_key_t));
	}
	if (params.hw_sigfn_c) {
		getSigRaw(&sigraw, params.hw_sigfn_c);
		memcpy(pd->hw_sig_c, sigraw, sizeof(ecc_key_t));
	}
	memset(pd->sw_pkey_p, 0, sizeof(ecc_key_t));
	memset(pd->sw_pkey_q, 0, sizeof(ecc_key_t));
	memset(pd->sw_pkey_r, 0, sizeof(ecc_key_t));
	if (params.sw_keyfn_p) {
		getPublicKeyRaw(&pubkeyraw, params.sw_keyfn_p);
		memcpy(pd->sw_pkey_p, pubkeyraw, sizeof(ecc_key_t));
		ph->sw_key_count++;
	}
	if (params.sw_keyfn_q) {
		getPublicKeyRaw(&pubkeyraw, params.sw_keyfn_q);
		memcpy(pd->sw_pkey_q, pubkeyraw, sizeof(ecc_key_t));
		ph->sw_key_count++;
	}
	if (params.sw_keyfn_r) {
		getPublicKeyRaw(&pubkeyraw, params.sw_keyfn_r);
		memcpy(pd->sw_pkey_r, pubkeyraw, sizeof(ecc_key_t));
		ph->sw_key_count++;
	}
	ph->payload_size = cpu_to_be64(ph->sw_key_count * sizeof(ecc_key_t));
	p = SHA512(pd->sw_pkey_p, sizeof(ecc_key_t) * ph->sw_key_count, md);
	assert(p);
	memcpy(ph->payload_hash, md, sizeof(sha2_hash_t));

	if (params.prhdrfn)
		writeHdr((void *)ph, params.prhdrfn, PREFIX_HDR);

	swh = (ROM_sw_header_raw*)(((uint8_t*)pd) + sizeof(ecc_signature_t)*3 + be64_to_cpu(ph->payload_size));
	swh->ver_alg.version = cpu_to_be16(1);
	swh->ver_alg.hash_alg = 1;
	swh->ver_alg.sig_alg = 1;
	swh->code_start_offset = 0;
	swh->reserved = 0;
	swh->flags = 0;
	swh->reserved_0 = 0;
	swh->payload_size = cpu_to_be64(s.st_size);
	p = SHA512(infile, s.st_size, md);
	assert(p);
	memcpy(swh->payload_hash, md, sizeof(sha2_hash_t));

	if (params.swhdrfn)
		writeHdr((void *)swh, params.swhdrfn, SOFTWARE_HDR);

	ssig = (ROM_sw_sig_raw*)(((uint8_t*)swh) + sizeof(ROM_sw_header_raw));
	memset(ssig->sw_sig_p, 0, sizeof(ecc_signature_t));
	memset(ssig->sw_sig_q, 0, sizeof(ecc_signature_t));
	memset(ssig->sw_sig_r, 0, sizeof(ecc_signature_t));
	if (params.sw_sigfn_p) {
		getSigRaw(&sigraw, params.sw_sigfn_p);
		memcpy(ssig->sw_sig_p, sigraw, sizeof(ecc_key_t));
	}
	if (params.sw_sigfn_q) {
		getSigRaw(&sigraw, params.sw_sigfn_q);
		memcpy(ssig->sw_sig_q, sigraw, sizeof(ecc_key_t));
	}
	if (params.sw_sigfn_r) {
		getSigRaw(&sigraw, params.sw_sigfn_r);
		memcpy(ssig->sw_sig_r, sigraw, sizeof(ecc_key_t));
	}

	r = write(fdout, container, SECURE_BOOT_HEADERS_SIZE);
	assert(r == 4096);
	read(fdin, buf, s.st_size%4096);
	write(fdout, buf, s.st_size%4096);
	l = s.st_size - s.st_size%4096;
	while (l) {
		read(fdin, buf, 4096);
		write(fdout, buf, 4096);
		l-=4096;
	};
	close(fdin);
	close(fdout);

	free(container);
	free(buf);
	return 0;
}
Пример #14
0
/*
 * Write a modified dquot to disk.
 * The dquot must be locked and the flush lock too taken by caller.
 * The flush lock will not be unlocked until the dquot reaches the disk,
 * but the dquot is free to be unlocked and modified by the caller
 * in the interim. Dquot is still locked on return. This behavior is
 * identical to that of inodes.
 */
int
xfs_qm_dqflush(
	struct xfs_dquot	*dqp,
	struct xfs_buf		**bpp)
{
	struct xfs_mount	*mp = dqp->q_mount;
	struct xfs_buf		*bp;
	struct xfs_disk_dquot	*ddqp;
	int			error;

	ASSERT(XFS_DQ_IS_LOCKED(dqp));
	ASSERT(!completion_done(&dqp->q_flush));

	trace_xfs_dqflush(dqp);

	*bpp = NULL;

	xfs_qm_dqunpin_wait(dqp);

	/*
	 * This may have been unpinned because the filesystem is shutting
	 * down forcibly. If that's the case we must not write this dquot
	 * to disk, because the log record didn't make it to disk.
	 *
	 * We also have to remove the log item from the AIL in this case,
	 * as we wait for an emptry AIL as part of the unmount process.
	 */
	if (XFS_FORCED_SHUTDOWN(mp)) {
		struct xfs_log_item	*lip = &dqp->q_logitem.qli_item;
		dqp->dq_flags &= ~XFS_DQ_DIRTY;

		xfs_trans_ail_remove(lip, SHUTDOWN_CORRUPT_INCORE);

		error = -EIO;
		goto out_unlock;
	}

	/*
	 * Get the buffer containing the on-disk dquot
	 */
	error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
				   mp->m_quotainfo->qi_dqchunklen, 0, &bp,
				   &xfs_dquot_buf_ops);
	if (error)
		goto out_unlock;

	/*
	 * Calculate the location of the dquot inside the buffer.
	 */
	ddqp = bp->b_addr + dqp->q_bufoffset;

	/*
	 * A simple sanity check in case we got a corrupted dquot..
	 */
	error = xfs_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
			   XFS_QMOPT_DOWARN, "dqflush (incore copy)");
	if (error) {
		xfs_buf_relse(bp);
		xfs_dqfunlock(dqp);
		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
		return -EIO;
	}

	/* This is the only portion of data that needs to persist */
	memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t));

	/*
	 * Clear the dirty field and remember the flush lsn for later use.
	 */
	dqp->dq_flags &= ~XFS_DQ_DIRTY;

	xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
					&dqp->q_logitem.qli_item.li_lsn);

	/*
	 * copy the lsn into the on-disk dquot now while we have the in memory
	 * dquot here. This can't be done later in the write verifier as we
	 * can't get access to the log item at that point in time.
	 *
	 * We also calculate the CRC here so that the on-disk dquot in the
	 * buffer always has a valid CRC. This ensures there is no possibility
	 * of a dquot without an up-to-date CRC getting to disk.
	 */
	if (xfs_sb_version_hascrc(&mp->m_sb)) {
		struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddqp;

		dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
		xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk),
				 XFS_DQUOT_CRC_OFF);
	}

	/*
	 * Attach an iodone routine so that we can remove this dquot from the
	 * AIL and release the flush lock once the dquot is synced to disk.
	 */
	xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done,
				  &dqp->q_logitem.qli_item);

	/*
	 * If the buffer is pinned then push on the log so we won't
	 * get stuck waiting in the write for too long.
	 */
	if (xfs_buf_ispinned(bp)) {
		trace_xfs_dqflush_force(dqp);
		xfs_log_force(mp, 0);
	}

	trace_xfs_dqflush_done(dqp);
	*bpp = bp;
	return 0;

out_unlock:
	xfs_dqfunlock(dqp);
	return -EIO;
}
Пример #15
0
/*
 * Add new blocks to the current extent, or create new entries/continuations
 * as necessary.
 */
static int omfs_grow_extent(struct inode *inode, struct omfs_extent *oe,
			u64 *ret_block)
{
	struct omfs_extent_entry *terminator;
	struct omfs_extent_entry *entry = &oe->e_entry;
	struct omfs_sb_info *sbi = OMFS_SB(inode->i_sb);
	u32 extent_count = be32_to_cpu(oe->e_extent_count);
	u64 new_block = 0;
	u32 max_count;
	int new_count;
	int ret = 0;

	/* reached the end of the extent table with no blocks mapped.
	 * there are three possibilities for adding: grow last extent,
	 * add a new extent to the current extent table, and add a
	 * continuation inode.  in last two cases need an allocator for
	 * sbi->s_cluster_size
	 */

	/* TODO: handle holes */

	/* should always have a terminator */
	if (extent_count < 1)
		return -EIO;

	/* trivially grow current extent, if next block is not taken */
	terminator = entry + extent_count - 1;
	if (extent_count > 1) {
		entry = terminator-1;
		new_block = be64_to_cpu(entry->e_cluster) +
			be64_to_cpu(entry->e_blocks);

		if (omfs_allocate_block(inode->i_sb, new_block)) {
			entry->e_blocks =
				cpu_to_be64(be64_to_cpu(entry->e_blocks) + 1);
			terminator->e_blocks = ~(cpu_to_be64(
				be64_to_cpu(~terminator->e_blocks) + 1));
			goto out;
		}
	}
	max_count = omfs_max_extents(sbi, OMFS_EXTENT_START);

	/* TODO: add a continuation block here */
	if (be32_to_cpu(oe->e_extent_count) > max_count-1)
		return -EIO;

	/* try to allocate a new cluster */
	ret = omfs_allocate_range(inode->i_sb, 1, sbi->s_clustersize,
		&new_block, &new_count);
	if (ret)
		goto out_fail;

	/* copy terminator down an entry */
	entry = terminator;
	terminator++;
	memcpy(terminator, entry, sizeof(struct omfs_extent_entry));

	entry->e_cluster = cpu_to_be64(new_block);
	entry->e_blocks = cpu_to_be64((u64) new_count);

	terminator->e_blocks = ~(cpu_to_be64(
		be64_to_cpu(~terminator->e_blocks) + (u64) new_count));

	/* write in new entry */
	oe->e_extent_count = cpu_to_be32(1 + be32_to_cpu(oe->e_extent_count));

out:
	*ret_block = new_block;
out_fail:
	return ret;
}
Пример #16
0
int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
			  struct ib_recv_wr **bad_wr)
{
	struct mlx4_ib_srq *srq = to_msrq(ibsrq);
	struct mlx4_wqe_srq_next_seg *next;
	struct mlx4_wqe_data_seg *scat;
	unsigned long flags;
	int err = 0;
	int nreq;
	int i;
	struct mlx4_ib_dev *mdev = to_mdev(ibsrq->device);

	spin_lock_irqsave(&srq->lock, flags);
	if (mdev->dev->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
		err = -EIO;
		*bad_wr = wr;
		nreq = 0;
		goto out;
	}

	for (nreq = 0; wr; ++nreq, wr = wr->next) {
		if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
			err = -EINVAL;
			*bad_wr = wr;
			break;
		}

		if (unlikely(srq->head == srq->tail)) {
			err = -ENOMEM;
			*bad_wr = wr;
			break;
		}

		srq->wrid[srq->head] = wr->wr_id;

		next      = get_wqe(srq, srq->head);
		srq->head = be16_to_cpu(next->next_wqe_index);
		scat      = (struct mlx4_wqe_data_seg *) (next + 1);

		for (i = 0; i < wr->num_sge; ++i) {
			scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
			scat[i].lkey       = cpu_to_be32(wr->sg_list[i].lkey);
			scat[i].addr       = cpu_to_be64(wr->sg_list[i].addr);
		}

		if (i < srq->msrq.max_gs) {
			scat[i].byte_count = 0;
			scat[i].lkey       = cpu_to_be32(MLX4_INVALID_LKEY);
			scat[i].addr       = 0;
		}
	}

	if (likely(nreq)) {
		srq->wqe_ctr += nreq;

		/*
		 * Make sure that descriptors are written before
		 * doorbell record.
		 */
		wmb();

		*srq->db.db = cpu_to_be32(srq->wqe_ctr);
	}
out:

	spin_unlock_irqrestore(&srq->lock, flags);

	return err;
}
Пример #17
0
static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb)
{
	return ((sste->vsid_data == cpu_to_be64(slb->vsid)) &&
		(sste->esid_data == cpu_to_be64(slb->esid)));
}
Пример #18
0
static void do_io_interrupt(CPUS390XState *env)
{
    S390CPU *cpu = s390_env_get_cpu(env);
    LowCore *lowcore;
    IOIntQueue *q;
    uint8_t isc;
    int disable = 1;
    int found = 0;

    if (!(env->psw.mask & PSW_MASK_IO)) {
        cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
    }

    for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
        uint64_t isc_bits;

        if (env->io_index[isc] < 0) {
            continue;
        }
        if (env->io_index[isc] >= MAX_IO_QUEUE) {
            cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
                      isc, env->io_index[isc]);
        }

        q = &env->io_queue[env->io_index[isc]][isc];
        isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
        if (!(env->cregs[6] & isc_bits)) {
            disable = 0;
            continue;
        }
        if (!found) {
            uint64_t mask, addr;

            found = 1;
            lowcore = cpu_map_lowcore(env);

            lowcore->subchannel_id = cpu_to_be16(q->id);
            lowcore->subchannel_nr = cpu_to_be16(q->nr);
            lowcore->io_int_parm = cpu_to_be32(q->parm);
            lowcore->io_int_word = cpu_to_be32(q->word);
            lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
            lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
            mask = be64_to_cpu(lowcore->io_new_psw.mask);
            addr = be64_to_cpu(lowcore->io_new_psw.addr);

            cpu_unmap_lowcore(lowcore);

            env->io_index[isc]--;

            DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
                    env->psw.mask, env->psw.addr);
            load_psw(env, mask, addr);
        }
        if (env->io_index[isc] >= 0) {
            disable = 0;
        }
        continue;
    }

    if (disable) {
        env->pending_int &= ~INTERRUPT_IO;
    }

}
Пример #19
0
static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
		     struct t4_cq *rcq, struct t4_cq *scq,
		     struct c4iw_dev_ucontext *uctx)
{
	int user = (uctx != &rdev->uctx);
	struct fw_ri_res_wr *res_wr;
	struct fw_ri_res *res;
	int wr_len;
	struct c4iw_wr_wait wr_wait;
	struct sk_buff *skb;
	int ret;
	int eqsize;

	wq->sq.qid = c4iw_get_qpid(rdev, uctx);
	if (!wq->sq.qid)
		return -ENOMEM;

	wq->rq.qid = c4iw_get_qpid(rdev, uctx);
	if (!wq->rq.qid)
		goto err1;

	if (!user) {
		wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
				 GFP_KERNEL);
		if (!wq->sq.sw_sq)
			goto err2;

		wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
				 GFP_KERNEL);
		if (!wq->rq.sw_rq)
			goto err3;
	}

	/*
	 * RQT must be a power of 2.
	 */
	wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
	wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
	if (!wq->rq.rqt_hwaddr)
		goto err4;

	if (user) {
		if (alloc_oc_sq(rdev, &wq->sq) && alloc_host_sq(rdev, &wq->sq))
			goto err5;
	} else
		if (alloc_host_sq(rdev, &wq->sq))
			goto err5;
	memset(wq->sq.queue, 0, wq->sq.memsize);
	dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);

	wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
					  wq->rq.memsize, &(wq->rq.dma_addr),
					  GFP_KERNEL);
	if (!wq->rq.queue)
		goto err6;
	PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
		__func__, wq->sq.queue,
		(unsigned long long)virt_to_phys(wq->sq.queue),
		wq->rq.queue,
		(unsigned long long)virt_to_phys(wq->rq.queue));
	memset(wq->rq.queue, 0, wq->rq.memsize);
	dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);

	wq->db = rdev->lldi.db_reg;
	wq->gts = rdev->lldi.gts_reg;
	if (user) {
		wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
					(wq->sq.qid << rdev->qpshift);
		wq->sq.udb &= PAGE_MASK;
		wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
					(wq->rq.qid << rdev->qpshift);
		wq->rq.udb &= PAGE_MASK;
	}
	wq->rdev = rdev;
	wq->rq.msn = 1;

	/* build fw_ri_res_wr */
	wr_len = sizeof *res_wr + 2 * sizeof *res;

	skb = alloc_skb(wr_len, GFP_KERNEL);
	if (!skb) {
		ret = -ENOMEM;
		goto err7;
	}
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);

	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
	memset(res_wr, 0, wr_len);
	res_wr->op_nres = cpu_to_be32(
			FW_WR_OP(FW_RI_RES_WR) |
			V_FW_RI_RES_WR_NRES(2) |
			FW_WR_COMPL(1));
	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
	res_wr->cookie = (unsigned long) &wr_wait;
	res = res_wr->res;
	res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
	res->u.sqrq.op = FW_RI_RES_OP_WRITE;

	/*
	 * eqsize is the number of 64B entries plus the status page size.
	 */
	eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;

	res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
		V_FW_RI_RES_WR_HOSTFCMODE(0) |	/* no host cidx updates */
		V_FW_RI_RES_WR_CPRIO(0) |	/* don't keep in chip cache */
		V_FW_RI_RES_WR_PCIECHN(0) |	/* set by uP at ri_init time */
		(t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) |
		V_FW_RI_RES_WR_IQID(scq->cqid));
	res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
		V_FW_RI_RES_WR_DCAEN(0) |
		V_FW_RI_RES_WR_DCACPU(0) |
		V_FW_RI_RES_WR_FBMIN(2) |
		V_FW_RI_RES_WR_FBMAX(2) |
		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
		V_FW_RI_RES_WR_EQSIZE(eqsize));
	res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
	res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
	res++;
	res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
	res->u.sqrq.op = FW_RI_RES_OP_WRITE;

	/*
	 * eqsize is the number of 64B entries plus the status page size.
	 */
	eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
	res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
		V_FW_RI_RES_WR_HOSTFCMODE(0) |	/* no host cidx updates */
		V_FW_RI_RES_WR_CPRIO(0) |	/* don't keep in chip cache */
		V_FW_RI_RES_WR_PCIECHN(0) |	/* set by uP at ri_init time */
		V_FW_RI_RES_WR_IQID(rcq->cqid));
	res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
		V_FW_RI_RES_WR_DCAEN(0) |
		V_FW_RI_RES_WR_DCACPU(0) |
		V_FW_RI_RES_WR_FBMIN(2) |
		V_FW_RI_RES_WR_FBMAX(2) |
		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
		V_FW_RI_RES_WR_EQSIZE(eqsize));
	res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
	res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);

	c4iw_init_wr_wait(&wr_wait);

	ret = c4iw_ofld_send(rdev, skb);
	if (ret)
		goto err7;
	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
	if (ret)
		goto err7;

	PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
	     __func__, wq->sq.qid, wq->rq.qid, wq->db,
	     (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);

	return 0;
err7:
	dma_free_coherent(&(rdev->lldi.pdev->dev),
			  wq->rq.memsize, wq->rq.queue,
			  dma_unmap_addr(&wq->rq, mapping));
err6:
	dealloc_sq(rdev, &wq->sq);
err5:
	c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
err4:
	kfree(wq->rq.sw_rq);
err3:
	kfree(wq->sq.sw_sq);
err2:
	c4iw_put_qpid(rdev, wq->rq.qid, uctx);
err1:
	c4iw_put_qpid(rdev, wq->sq.qid, uctx);
	return -ENOMEM;
}
Пример #20
0
static void do_mchk_interrupt(CPUS390XState *env)
{
    S390CPU *cpu = s390_env_get_cpu(env);
    uint64_t mask, addr;
    LowCore *lowcore;
    MchkQueue *q;
    int i;

    if (!(env->psw.mask & PSW_MASK_MCHECK)) {
        cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
    }

    if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
        cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
    }

    q = &env->mchk_queue[env->mchk_index];

    if (q->type != 1) {
        /* Don't know how to handle this... */
        cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
    }
    if (!(env->cregs[14] & (1 << 28))) {
        /* CRW machine checks disabled */
        return;
    }

    lowcore = cpu_map_lowcore(env);

    for (i = 0; i < 16; i++) {
        lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
        lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
        lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
        lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
    }
    lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
    lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
    lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
    lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
    lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
    lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
    lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);

    lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
    lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
    lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
    lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
    mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
    addr = be64_to_cpu(lowcore->mcck_new_psw.addr);

    cpu_unmap_lowcore(lowcore);

    env->mchk_index--;
    if (env->mchk_index == -1) {
        env->pending_int &= ~INTERRUPT_MCHK;
    }

    DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
            env->psw.mask, env->psw.addr);

    load_psw(env, mask, addr);
}
Пример #21
0
/** @file
 *
 * SHA-512/256 algorithm
 *
 */

#include <stdint.h>
#include <byteswap.h>
#include <ipxe/crypto.h>
#include <ipxe/asn1.h>
#include <ipxe/sha512.h>

/** SHA-512/256 initial digest values */
static const struct sha512_digest sha512_256_init_digest = {
	.h = {
		cpu_to_be64 ( 0x22312194fc2bf72cULL ),
		cpu_to_be64 ( 0x9f555fa3c84c64c2ULL ),
		cpu_to_be64 ( 0x2393b86b6f53b151ULL ),
		cpu_to_be64 ( 0x963877195940eabdULL ),
		cpu_to_be64 ( 0x96283ee2a88effe3ULL ),
		cpu_to_be64 ( 0xbe5e1e2553863992ULL ),
		cpu_to_be64 ( 0x2b0199fc2c85b8aaULL ),
		cpu_to_be64 ( 0x0eb72ddc81c52ca2ULL ),
	},
};

/**
 * Initialise SHA-512/256 algorithm
 *
 * @v ctx		SHA-512/256 context
 */
Пример #22
0
static int sun4u_NVRAM_set_params (m48t59_t *nvram, uint16_t NVRAM_size,
                                   const char *arch,
                                   ram_addr_t RAM_size,
                                   const char *boot_devices,
                                   uint32_t kernel_image, uint32_t kernel_size,
                                   const char *cmdline,
                                   uint32_t initrd_image, uint32_t initrd_size,
                                   uint32_t NVRAM_image,
                                   int width, int height, int depth,
                                   const uint8_t *macaddr)
{
    unsigned int i;
    uint32_t start, end;
    uint8_t image[0x1ff0];
    ohwcfg_v3_t *header = (ohwcfg_v3_t *)&image;
    struct sparc_arch_cfg *sparc_header;
    struct OpenBIOS_nvpart_v1 *part_header;

    memset(image, '\0', sizeof(image));

    // Try to match PPC NVRAM
    pstrcpy((char *)header->struct_ident, sizeof(header->struct_ident),
            "QEMU_BIOS");
    header->struct_version = cpu_to_be32(3); /* structure v3 */

    header->nvram_size = cpu_to_be16(NVRAM_size);
    header->nvram_arch_ptr = cpu_to_be16(sizeof(ohwcfg_v3_t));
    header->nvram_arch_size = cpu_to_be16(sizeof(struct sparc_arch_cfg));
    pstrcpy((char *)header->arch, sizeof(header->arch), arch);
    header->nb_cpus = smp_cpus & 0xff;
    header->RAM0_base = 0;
    header->RAM0_size = cpu_to_be64((uint64_t)RAM_size);
    pstrcpy((char *)header->boot_devices, sizeof(header->boot_devices),
            boot_devices);
    header->nboot_devices = strlen(boot_devices) & 0xff;
    header->kernel_image = cpu_to_be64((uint64_t)kernel_image);
    header->kernel_size = cpu_to_be64((uint64_t)kernel_size);
    if (cmdline) {
        pstrcpy_targphys(CMDLINE_ADDR, TARGET_PAGE_SIZE, cmdline);
        header->cmdline = cpu_to_be64((uint64_t)CMDLINE_ADDR);
        header->cmdline_size = cpu_to_be64((uint64_t)strlen(cmdline));
    }
    header->initrd_image = cpu_to_be64((uint64_t)initrd_image);
    header->initrd_size = cpu_to_be64((uint64_t)initrd_size);
    header->NVRAM_image = cpu_to_be64((uint64_t)NVRAM_image);

    header->width = cpu_to_be16(width);
    header->height = cpu_to_be16(height);
    header->depth = cpu_to_be16(depth);
    if (nographic)
        header->graphic_flags = cpu_to_be16(OHW_GF_NOGRAPHICS);

    header->crc = cpu_to_be16(OHW_compute_crc(header, 0x00, 0xF8));

    // Architecture specific header
    start = sizeof(ohwcfg_v3_t);
    sparc_header = (struct sparc_arch_cfg *)&image[start];
    sparc_header->valid = 0;
    start += sizeof(struct sparc_arch_cfg);

    // OpenBIOS nvram variables
    // Variable partition
    part_header = (struct OpenBIOS_nvpart_v1 *)&image[start];
    part_header->signature = OPENBIOS_PART_SYSTEM;
    pstrcpy(part_header->name, sizeof(part_header->name), "system");

    end = start + sizeof(struct OpenBIOS_nvpart_v1);
    for (i = 0; i < nb_prom_envs; i++)
        end = OpenBIOS_set_var(image, end, prom_envs[i]);

    // End marker
    image[end++] = '\0';

    end = start + ((end - start + 15) & ~15);
    OpenBIOS_finish_partition(part_header, end - start);

    // free partition
    start = end;
    part_header = (struct OpenBIOS_nvpart_v1 *)&image[start];
    part_header->signature = OPENBIOS_PART_FREE;
    pstrcpy(part_header->name, sizeof(part_header->name), "free");

    end = 0x1fd0;
    OpenBIOS_finish_partition(part_header, end - start);

    Sun_init_header((struct Sun_nvram *)&image[0x1fd8], macaddr, 0x80);

    for (i = 0; i < sizeof(image); i++)
        m48t59_write(nvram, i, image[i]);

    qemu_register_boot_set(nvram_boot_set, nvram);

    return 0;
}
Пример #23
0
static void s390x_write_elf64_timer(Note *note, S390CPU *cpu)
{
    note->hdr.n_type = cpu_to_be32(NT_S390_TIMER);
    note->contents.timer = cpu_to_be64((uint64_t)(cpu->env.cputm));
}
Пример #24
0
static bool mlx4_en_build_dma_wqe(struct mlx4_en_priv *priv,
				  struct skb_shared_info *shinfo,
				  struct mlx4_wqe_data_seg *data,
				  struct sk_buff *skb,
				  int lso_header_size,
				  __be32 mr_key,
				  struct mlx4_en_tx_info *tx_info)
{
	struct device *ddev = priv->ddev;
	dma_addr_t dma = 0;
	u32 byte_count = 0;
	int i_frag;

	/* Map fragments if any */
	for (i_frag = shinfo->nr_frags - 1; i_frag >= 0; i_frag--) {
		const struct skb_frag_struct *frag;

		frag = &shinfo->frags[i_frag];
		byte_count = skb_frag_size(frag);
		dma = skb_frag_dma_map(ddev, frag,
				       0, byte_count,
				       DMA_TO_DEVICE);
		if (dma_mapping_error(ddev, dma))
			goto tx_drop_unmap;

		data->addr = cpu_to_be64(dma);
		data->lkey = mr_key;
		dma_wmb();
		data->byte_count = cpu_to_be32(byte_count);
		--data;
	}

	/* Map linear part if needed */
	if (tx_info->linear) {
		byte_count = skb_headlen(skb) - lso_header_size;

		dma = dma_map_single(ddev, skb->data +
				     lso_header_size, byte_count,
				     PCI_DMA_TODEVICE);
		if (dma_mapping_error(ddev, dma))
			goto tx_drop_unmap;

		data->addr = cpu_to_be64(dma);
		data->lkey = mr_key;
		dma_wmb();
		data->byte_count = cpu_to_be32(byte_count);
	}
	/* tx completion can avoid cache line miss for common cases */
	tx_info->map0_dma = dma;
	tx_info->map0_byte_count = byte_count;

	return true;

tx_drop_unmap:
	en_err(priv, "DMA mapping error\n");

	while (++i_frag < shinfo->nr_frags) {
		++data;
		dma_unmap_page(ddev, (dma_addr_t)be64_to_cpu(data->addr),
			       be32_to_cpu(data->byte_count),
			       PCI_DMA_TODEVICE);
	}

	return false;
}
Пример #25
0
void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
{
	struct gfs2_dinode *str = buf;

	str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
	str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
	str->di_header.__pad0 = 0;
	str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
	str->di_header.__pad1 = 0;
	str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
	str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
	str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
	str->di_uid = cpu_to_be32(ip->i_inode.i_uid);
	str->di_gid = cpu_to_be32(ip->i_inode.i_gid);
	str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
	str->di_size = cpu_to_be64(ip->i_disksize);
	str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
	str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
	str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
	str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);

	str->di_goal_meta = cpu_to_be64(ip->i_goal);
	str->di_goal_data = cpu_to_be64(ip->i_goal);
	str->di_generation = cpu_to_be64(ip->i_generation);

	str->di_flags = cpu_to_be32(ip->i_diskflags);
	str->di_height = cpu_to_be16(ip->i_height);
	str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
					     !(ip->i_diskflags & GFS2_DIF_EXHASH) ?
					     GFS2_FORMAT_DE : 0);
	str->di_depth = cpu_to_be16(ip->i_depth);
	str->di_entries = cpu_to_be32(ip->i_entries);

	str->di_eattr = cpu_to_be64(ip->i_eattr);
	str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
	str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec);
	str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec);
}
Пример #26
0
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
		  struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
		  unsigned vector, int collapsed)
{
	struct mlx4_priv *priv = mlx4_priv(dev);
	struct mlx4_cq_table *cq_table = &priv->cq_table;
	struct mlx4_cmd_mailbox *mailbox;
	struct mlx4_cq_context *cq_context;
	u64 mtt_addr;
	int err;

	if (vector >= dev->caps.num_comp_vectors)
		return -EINVAL;

	cq->vector = (vector == MLX4_LEAST_ATTACHED_VECTOR) ?
		mlx4_find_least_loaded_vector(priv) : vector;

	err = mlx4_cq_alloc_icm(dev, &cq->cqn);
	if (err)
		return err;

	spin_lock_irq(&cq_table->lock);
	err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
	spin_unlock_irq(&cq_table->lock);
	if (err)
		goto err_icm;

	mailbox = mlx4_alloc_cmd_mailbox(dev);
	if (IS_ERR(mailbox)) {
		err = PTR_ERR(mailbox);
		goto err_radix;
	}

	cq_context = mailbox->buf;
	memset(cq_context, 0, sizeof *cq_context);

	cq_context->flags	    = cpu_to_be32(!!collapsed << 18);
	cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
	cq_context->comp_eqn	    = priv->eq_table.eq[vector].eqn;
	cq_context->log_page_size   = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;

	mtt_addr = mlx4_mtt_addr(dev, mtt);
	cq_context->mtt_base_addr_h = mtt_addr >> 32;
	cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
	cq_context->pd = cpu_to_be32(dev->caps.pd_base << dev->caps.slave_pd_shift);
	cq_context->db_rec_addr     = cpu_to_be64(db_rec);

	err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn);
	mlx4_free_cmd_mailbox(dev, mailbox);
	if (err)
		goto err_radix;

	priv->eq_table.eq[cq->vector].load++;
	cq->cons_index = 0;
	cq->arm_sn     = 1;
	cq->uar        = uar;
	atomic_set(&cq->refcount, 1);
	init_completion(&cq->free);

	return 0;

err_radix:
	spin_lock_irq(&cq_table->lock);
	radix_tree_delete(&cq_table->tree, cq->cqn);
	spin_unlock_irq(&cq_table->lock);

err_icm:
	mlx4_cq_free_icm(dev, cq->cqn);

	return err;
}
/*
 * siw_qp_prepare_tx()
 *
 * Prepare tx state for sending out one fpdu. Builds complete pkt
 * if no user data or only immediate data are present.
 *
 * returns PKT_COMPLETE if complete pkt built, PKT_FRAGMENTED otherwise.
 */
static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
{
	struct siw_wqe		*wqe = c_tx->wqe;
	u32			*crc = NULL;

	dprint(DBG_TX, "(QP%d):\n", TX_QPID(c_tx));

	switch (wr_type(wqe)) {

	case SIW_WR_RDMA_READ_REQ:
		memcpy(&c_tx->pkt.ctrl,
		       &iwarp_pktinfo[RDMAP_RDMA_READ_REQ].ctrl,
		       sizeof(struct iwarp_ctrl));

		c_tx->pkt.rreq.rsvd = 0;
		c_tx->pkt.rreq.ddp_qn = htonl(RDMAP_UNTAGGED_QN_RDMA_READ);
		c_tx->pkt.rreq.ddp_msn =
			htonl(++c_tx->ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ]);
		c_tx->pkt.rreq.ddp_mo = 0;
		c_tx->pkt.rreq.sink_stag = htonl(wqe->wr.rread.sge[0].lkey);
		c_tx->pkt.rreq.sink_to =
			cpu_to_be64(wqe->wr.rread.sge[0].addr); /* abs addr! */
		c_tx->pkt.rreq.source_stag = htonl(wqe->wr.rread.rtag);
		c_tx->pkt.rreq.source_to = cpu_to_be64(wqe->wr.rread.raddr);
		c_tx->pkt.rreq.read_size = htonl(wqe->bytes);

		dprint(DBG_TX, ": RREQ: Sink: %x, 0x%016llx\n",
			wqe->wr.rread.sge[0].lkey, wqe->wr.rread.sge[0].addr);

		c_tx->ctrl_len = sizeof(struct iwarp_rdma_rreq);
		crc = &c_tx->pkt.rreq_pkt.crc;
		break;

	case SIW_WR_SEND:
		if (wr_flags(wqe) & IB_SEND_SOLICITED)
			memcpy(&c_tx->pkt.ctrl,
			       &iwarp_pktinfo[RDMAP_SEND_SE].ctrl,
			       sizeof(struct iwarp_ctrl));
		else
			memcpy(&c_tx->pkt.ctrl,
			       &iwarp_pktinfo[RDMAP_SEND].ctrl,
			       sizeof(struct iwarp_ctrl));

		c_tx->pkt.send.ddp_qn = RDMAP_UNTAGGED_QN_SEND;
		c_tx->pkt.send.ddp_msn =
			htonl(++c_tx->ddp_msn[RDMAP_UNTAGGED_QN_SEND]);
		c_tx->pkt.send.ddp_mo = 0;
		c_tx->pkt.send.rsvd = 0;

		c_tx->ctrl_len = sizeof(struct iwarp_send);

		if (!wqe->bytes)
			crc = &c_tx->pkt.send_pkt.crc;
		break;

	case SIW_WR_RDMA_WRITE:
		memcpy(&c_tx->pkt.ctrl, &iwarp_pktinfo[RDMAP_RDMA_WRITE].ctrl,
		       sizeof(struct iwarp_ctrl));

		c_tx->pkt.rwrite.sink_stag = htonl(wqe->wr.write.rtag);
		c_tx->pkt.rwrite.sink_to = cpu_to_be64(wqe->wr.write.raddr);
		c_tx->ctrl_len = sizeof(struct iwarp_rdma_write);

		if (!wqe->bytes)
			crc = &c_tx->pkt.write_pkt.crc;
		break;

	case SIW_WR_RDMA_READ_RESP:
		memcpy(&c_tx->pkt.ctrl,
		       &iwarp_pktinfo[RDMAP_RDMA_READ_RESP].ctrl,
		       sizeof(struct iwarp_ctrl));

		/* NBO */
		c_tx->pkt.rresp.sink_stag = cpu_to_be32(wqe->wr.rresp.rtag);
		c_tx->pkt.rresp.sink_to = cpu_to_be64(wqe->wr.rresp.raddr);

		c_tx->ctrl_len = sizeof(struct iwarp_rdma_rresp);

		dprint(DBG_TX, ": RRESP: Sink: %x, 0x%016llx\n",
			wqe->wr.rresp.rtag, wqe->wr.rresp.raddr);

		if (!wqe->bytes)
			crc = &c_tx->pkt.rresp_pkt.crc;
		break;

	default:
		dprint(DBG_ON, "Unsupported WQE type %d\n", wr_type(wqe));
		BUG();
		break;
	}
	c_tx->ctrl_sent = 0;
	c_tx->sge_idx = 0;
	c_tx->sge_off = 0;
	c_tx->pg_idx = 0;
	c_tx->umem_chunk = NULL;

	/*
	 * Do complete CRC if enabled and short packet
	 */
	if (crc) {
		*crc = 0;
		if (c_tx->crc_enabled) {
			if (siw_crc_txhdr(c_tx) != 0)
				return -EINVAL;
			crypto_hash_final(&c_tx->mpa_crc_hd, (u8 *)crc);
		}
	}
	c_tx->ctrl_len += MPA_CRC_SIZE;

	/*
	 * Allow direct sending out of user buffer if WR is non signalled
	 * and payload is over threshold and no CRC is enabled.
	 * Per RDMA verbs, the application should not change the send buffer
	 * until the work completed. In iWarp, work completion is only
	 * local delivery to TCP. TCP may reuse the buffer for
	 * retransmission. Changing unsent data also breaks the CRC,
	 * if applied.
	 */
	if (zcopy_tx
	    && !SIW_INLINED_DATA(wqe)
	    && !(wr_flags(wqe) & IB_SEND_SIGNALED)
	    && wqe->bytes > SENDPAGE_THRESH
	    && wr_type(wqe) != SIW_WR_RDMA_READ_REQ)
		c_tx->use_sendpage = 1;
	else
		c_tx->use_sendpage = 0;

	return crc == NULL ? PKT_FRAGMENTED : PKT_COMPLETE;
}
Пример #28
0
int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
                          struct ib_recv_wr **bad_wr)
{
    struct mlx4_ib_srq *srq = to_msrq(ibsrq);
    struct mlx4_wqe_srq_next_seg *next;
    struct mlx4_wqe_data_seg *scat;
    unsigned long flags;
    int err = 0;
    int nreq;
    int i;

    spin_lock_irqsave(&srq->lock, flags);

    for (nreq = 0; wr; ++nreq, wr = wr->next) {
        if (unlikely(wr->num_sge > srq->msrq.max_gs)) {
            mlx4_ib_dbg("srq num 0x%x: num s/g entries too large (%d)",
                        srq->msrq.srqn, wr->num_sge);
            err = -EINVAL;
            *bad_wr = wr;
            break;
        }

        if (unlikely(srq->head == srq->tail)) {
            mlx4_ib_dbg("srq num 0x%x: No entries available to post.",
                        srq->msrq.srqn);
            err = -ENOMEM;
            *bad_wr = wr;
            break;
        }

        srq->wrid[srq->head] = wr->wr_id;

        next      = get_wqe(srq, srq->head);
        srq->head = be16_to_cpu(next->next_wqe_index);
        scat      = (struct mlx4_wqe_data_seg *) (next + 1);

        for (i = 0; i < wr->num_sge; ++i) {
            scat[i].byte_count = cpu_to_be32(wr->sg_list[i].length);
            scat[i].lkey       = cpu_to_be32(wr->sg_list[i].lkey);
            scat[i].addr       = cpu_to_be64(wr->sg_list[i].addr);
        }

        if (i < srq->msrq.max_gs) {
            scat[i].byte_count = 0;
            scat[i].lkey       = cpu_to_be32(MLX4_INVALID_LKEY);
            scat[i].addr       = 0;
        }
    }

    if (likely(nreq)) {
        srq->wqe_ctr += nreq;

        /*
         * Make sure that descriptors are written before
         * doorbell record.
         */
        wmb();

        *srq->db.db = cpu_to_be32(srq->wqe_ctr);
    }

    spin_unlock_irqrestore(&srq->lock, flags);

    return err;
}
Пример #29
0
/* This is an inline function, we don't really care about a long
 * list of arguments */
static inline int
__build_packet_message(struct nfnl_log_net *log,
			struct nfulnl_instance *inst,
			const struct sk_buff *skb,
			unsigned int data_len,
			u_int8_t pf,
			unsigned int hooknum,
			const struct net_device *indev,
			const struct net_device *outdev,
			const char *prefix, unsigned int plen)
{
	struct nfulnl_msg_packet_hdr pmsg;
	struct nlmsghdr *nlh;
	struct nfgenmsg *nfmsg;
	sk_buff_data_t old_tail = inst->skb->tail;
	struct sock *sk;
	const unsigned char *hwhdrp;

	nlh = nlmsg_put(inst->skb, 0, 0,
			NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
			sizeof(struct nfgenmsg), 0);
	if (!nlh)
		return -1;
	nfmsg = nlmsg_data(nlh);
	nfmsg->nfgen_family = pf;
	nfmsg->version = NFNETLINK_V0;
	nfmsg->res_id = htons(inst->group_num);

	memset(&pmsg, 0, sizeof(pmsg));
	pmsg.hw_protocol	= skb->protocol;
	pmsg.hook		= hooknum;

	if (nla_put(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg))
		goto nla_put_failure;

	if (prefix &&
	    nla_put(inst->skb, NFULA_PREFIX, plen, prefix))
		goto nla_put_failure;

	if (indev) {
#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
		if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
				 htonl(indev->ifindex)))
			goto nla_put_failure;
#else
		if (pf == PF_BRIDGE) {
			/* Case 1: outdev is physical input device, we need to
			 * look for bridge group (when called from
			 * netfilter_bridge) */
			if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
					 htonl(indev->ifindex)) ||
			/* this is the bridge group "brX" */
			/* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
			    nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
					 htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
				goto nla_put_failure;
		} else {
			struct net_device *physindev;

			/* Case 2: indev is bridge group, we need to look for
			 * physical device (when called from ipv4) */
			if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
					 htonl(indev->ifindex)))
				goto nla_put_failure;

			physindev = nf_bridge_get_physindev(skb);
			if (physindev &&
			    nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
					 htonl(physindev->ifindex)))
				goto nla_put_failure;
		}
#endif
	}

	if (outdev) {
#if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
		if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
				 htonl(outdev->ifindex)))
			goto nla_put_failure;
#else
		if (pf == PF_BRIDGE) {
			/* Case 1: outdev is physical output device, we need to
			 * look for bridge group (when called from
			 * netfilter_bridge) */
			if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
					 htonl(outdev->ifindex)) ||
			/* this is the bridge group "brX" */
			/* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
			    nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
					 htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
				goto nla_put_failure;
		} else {
			struct net_device *physoutdev;

			/* Case 2: indev is a bridge group, we need to look
			 * for physical device (when called from ipv4) */
			if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
					 htonl(outdev->ifindex)))
				goto nla_put_failure;

			physoutdev = nf_bridge_get_physoutdev(skb);
			if (physoutdev &&
			    nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
					 htonl(physoutdev->ifindex)))
				goto nla_put_failure;
		}
#endif
	}

	if (skb->mark &&
	    nla_put_be32(inst->skb, NFULA_MARK, htonl(skb->mark)))
		goto nla_put_failure;

	if (indev && skb->dev &&
	    skb->mac_header != skb->network_header) {
		struct nfulnl_msg_packet_hw phw;
		int len;

		memset(&phw, 0, sizeof(phw));
		len = dev_parse_header(skb, phw.hw_addr);
		if (len > 0) {
			phw.hw_addrlen = htons(len);
			if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
				goto nla_put_failure;
		}
	}

	if (indev && skb_mac_header_was_set(skb)) {
		if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
		    nla_put_be16(inst->skb, NFULA_HWLEN,
				 htons(skb->dev->hard_header_len)))
			goto nla_put_failure;

		hwhdrp = skb_mac_header(skb);

		if (skb->dev->type == ARPHRD_SIT)
			hwhdrp -= ETH_HLEN;

		if (hwhdrp >= skb->head &&
		    nla_put(inst->skb, NFULA_HWHEADER,
			    skb->dev->hard_header_len, hwhdrp))
			goto nla_put_failure;
	}

	if (skb->tstamp.tv64) {
		struct nfulnl_msg_packet_timestamp ts;
		struct timeval tv = ktime_to_timeval(skb->tstamp);
		ts.sec = cpu_to_be64(tv.tv_sec);
		ts.usec = cpu_to_be64(tv.tv_usec);

		if (nla_put(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts))
			goto nla_put_failure;
	}

	/* UID */
	sk = skb->sk;
	if (sk && sk_fullsock(sk)) {
		read_lock_bh(&sk->sk_callback_lock);
		if (sk->sk_socket && sk->sk_socket->file) {
			struct file *file = sk->sk_socket->file;
			const struct cred *cred = file->f_cred;
			struct user_namespace *user_ns = inst->peer_user_ns;
			__be32 uid = htonl(from_kuid_munged(user_ns, cred->fsuid));
			__be32 gid = htonl(from_kgid_munged(user_ns, cred->fsgid));
			read_unlock_bh(&sk->sk_callback_lock);
			if (nla_put_be32(inst->skb, NFULA_UID, uid) ||
			    nla_put_be32(inst->skb, NFULA_GID, gid))
				goto nla_put_failure;
		} else
			read_unlock_bh(&sk->sk_callback_lock);
	}

	/* local sequence number */
	if ((inst->flags & NFULNL_CFG_F_SEQ) &&
	    nla_put_be32(inst->skb, NFULA_SEQ, htonl(inst->seq++)))
		goto nla_put_failure;

	/* global sequence number */
	if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
	    nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
			 htonl(atomic_inc_return(&log->global_seq))))
		goto nla_put_failure;

	if (data_len) {
		struct nlattr *nla;
		int size = nla_attr_size(data_len);

		if (skb_tailroom(inst->skb) < nla_total_size(data_len))
			goto nla_put_failure;

		nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len));
		nla->nla_type = NFULA_PAYLOAD;
		nla->nla_len = size;

		if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
			BUG();
	}

	nlh->nlmsg_len = inst->skb->tail - old_tail;
	return 0;

nla_put_failure:
	PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n");
	return -1;
}
Пример #30
0
int c2_alloc_qp(struct c2_dev *c2dev,
		struct c2_pd *pd,
		struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp)
{
	struct c2wr_qp_create_req wr;
	struct c2wr_qp_create_rep *reply;
	struct c2_vq_req *vq_req;
	struct c2_cq *send_cq = to_c2cq(qp_attrs->send_cq);
	struct c2_cq *recv_cq = to_c2cq(qp_attrs->recv_cq);
	unsigned long peer_pa;
	u32 q_size, msg_size, mmap_size;
	void __iomem *mmap;
	int err;

	err = c2_alloc_qpn(c2dev, qp);
	if (err)
		return err;
	qp->ibqp.qp_num = qp->qpn;
	qp->ibqp.qp_type = IB_QPT_RC;

	/* Allocate the SQ and RQ shared pointers */
	qp->sq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
					 &qp->sq_mq.shared_dma, GFP_KERNEL);
	if (!qp->sq_mq.shared) {
		err = -ENOMEM;
		goto bail0;
	}

	qp->rq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
					 &qp->rq_mq.shared_dma, GFP_KERNEL);
	if (!qp->rq_mq.shared) {
		err = -ENOMEM;
		goto bail1;
	}

	/* Allocate the verbs request */
	vq_req = vq_req_alloc(c2dev);
	if (vq_req == NULL) {
		err = -ENOMEM;
		goto bail2;
	}

	/* Initialize the work request */
	memset(&wr, 0, sizeof(wr));
	c2_wr_set_id(&wr, CCWR_QP_CREATE);
	wr.hdr.context = (unsigned long) vq_req;
	wr.rnic_handle = c2dev->adapter_handle;
	wr.sq_cq_handle = send_cq->adapter_handle;
	wr.rq_cq_handle = recv_cq->adapter_handle;
	wr.sq_depth = cpu_to_be32(qp_attrs->cap.max_send_wr + 1);
	wr.rq_depth = cpu_to_be32(qp_attrs->cap.max_recv_wr + 1);
	wr.srq_handle = 0;
	wr.flags = cpu_to_be32(QP_RDMA_READ | QP_RDMA_WRITE | QP_MW_BIND |
			       QP_ZERO_STAG | QP_RDMA_READ_RESPONSE);
	wr.send_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
	wr.recv_sgl_depth = cpu_to_be32(qp_attrs->cap.max_recv_sge);
	wr.rdma_write_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
	wr.shared_sq_ht = cpu_to_be64(qp->sq_mq.shared_dma);
	wr.shared_rq_ht = cpu_to_be64(qp->rq_mq.shared_dma);
	wr.ord = cpu_to_be32(C2_MAX_ORD_PER_QP);
	wr.ird = cpu_to_be32(C2_MAX_IRD_PER_QP);
	wr.pd_id = pd->pd_id;
	wr.user_context = (unsigned long) qp;

	vq_req_get(c2dev, vq_req);

	/* Send the WR to the adapter */
	err = vq_send_wr(c2dev, (union c2wr *) & wr);
	if (err) {
		vq_req_put(c2dev, vq_req);
		goto bail3;
	}

	/* Wait for the verb reply  */
	err = vq_wait_for_reply(c2dev, vq_req);
	if (err) {
		goto bail3;
	}

	/* Process the reply */
	reply = (struct c2wr_qp_create_rep *) (unsigned long) (vq_req->reply_msg);
	if (!reply) {
		err = -ENOMEM;
		goto bail3;
	}

	if ((err = c2_wr_get_result(reply)) != 0) {
		goto bail4;
	}

	/* Fill in the kernel QP struct */
	atomic_set(&qp->refcount, 1);
	qp->adapter_handle = reply->qp_handle;
	qp->state = IB_QPS_RESET;
	qp->send_sgl_depth = qp_attrs->cap.max_send_sge;
	qp->rdma_write_sgl_depth = qp_attrs->cap.max_send_sge;
	qp->recv_sgl_depth = qp_attrs->cap.max_recv_sge;

	/* Initialize the SQ MQ */
	q_size = be32_to_cpu(reply->sq_depth);
	msg_size = be32_to_cpu(reply->sq_msg_size);
	peer_pa = c2dev->pa + be32_to_cpu(reply->sq_mq_start);
	mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
	mmap = ioremap_nocache(peer_pa, mmap_size);
	if (!mmap) {
		err = -ENOMEM;
		goto bail5;
	}

	c2_mq_req_init(&qp->sq_mq,
		       be32_to_cpu(reply->sq_mq_index),
		       q_size,
		       msg_size,
		       mmap + sizeof(struct c2_mq_shared),	/* pool start */
		       mmap,				/* peer */
		       C2_MQ_ADAPTER_TARGET);

	/* Initialize the RQ mq */
	q_size = be32_to_cpu(reply->rq_depth);
	msg_size = be32_to_cpu(reply->rq_msg_size);
	peer_pa = c2dev->pa + be32_to_cpu(reply->rq_mq_start);
	mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
	mmap = ioremap_nocache(peer_pa, mmap_size);
	if (!mmap) {
		err = -ENOMEM;
		goto bail6;
	}

	c2_mq_req_init(&qp->rq_mq,
		       be32_to_cpu(reply->rq_mq_index),
		       q_size,
		       msg_size,
		       mmap + sizeof(struct c2_mq_shared),	/* pool start */
		       mmap,				/* peer */
		       C2_MQ_ADAPTER_TARGET);

	vq_repbuf_free(c2dev, reply);
	vq_req_free(c2dev, vq_req);

	return 0;

      bail6:
	iounmap(qp->sq_mq.peer);
      bail5:
	destroy_qp(c2dev, qp);
      bail4:
	vq_repbuf_free(c2dev, reply);
      bail3:
	vq_req_free(c2dev, vq_req);
      bail2:
	c2_free_mqsp(qp->rq_mq.shared);
      bail1:
	c2_free_mqsp(qp->sq_mq.shared);
      bail0:
	c2_free_qpn(c2dev, qp->qpn);
	return err;
}