Beispiel #1
0
int psmi_sysbuf_init(void)
{
	int i;
	uint32_t block_sizes[] = { 256, 512, 1024,
		2048, 4096, 8192, (uint32_t) -1 };
	uint32_t replenishing_rate[] = { 128, 64, 32, 16, 8, 4, 0 };

	if (psmi_sysbuf.is_initialized)
		return PSM_OK;

	for (i = 0; i < MM_NUM_OF_POOLS; i++) {
		psmi_sysbuf.handler_index[i].block_size = block_sizes[i];
		psmi_sysbuf.handler_index[i].current_available = 0;
		psmi_sysbuf.handler_index[i].free_list = NULL;
		psmi_sysbuf.handler_index[i].total_alloc = 0;
		psmi_sysbuf.handler_index[i].replenishing_rate =
			replenishing_rate[i];

		if (block_sizes[i] == -1) {
			psmi_assert_always(replenishing_rate[i] == 0);
			psmi_sysbuf.handler_index[i].flags =
				MM_FLAG_TRANSIENT;
		} else {
			psmi_assert_always(replenishing_rate[i] > 0);
			psmi_sysbuf.handler_index[i].flags = MM_FLAG_NONE;
		}
	}

	VALGRIND_CREATE_MEMPOOL(&psmi_sysbuf, PSM_VALGRIND_REDZONE_SZ,
				PSM_VALGRIND_MEM_UNDEFINED);

	/* Hit once on each block size so we have a pool that's allocated */
	for (i = 0; i < MM_NUM_OF_POOLS; i++) {
		void *ptr;
		if (block_sizes[i] == -1)
			continue;
		ptr = psmi_sysbuf_alloc(block_sizes[i]);
		psmi_sysbuf_free(ptr);
	}

	return PSM_OK;
}
Beispiel #2
0
int ips_proto_am(struct ips_recvhdrq_event *rcv_ev)
{
	struct ips_message_header *p_hdr = rcv_ev->p_hdr;
	struct ips_epaddr *ipsaddr = rcv_ev->ipsaddr;
	struct ips_proto_am *proto_am = &rcv_ev->proto->proto_am;
	ips_epaddr_flow_t flowid = ips_proto_flowid(p_hdr);
	struct ips_flow *flow;
	struct ips_am_message *msg = NULL;
	int ret = IPS_RECVHDRQ_CONTINUE;
	enum ips_msg_order msgorder;

	psmi_assert(flowid < EP_FLOW_LAST);
	flow = &ipsaddr->flows[flowid];
	/*
	 * Based on AM request/reply traffic pattern, if we don't have a reply
	 * scb slot then we can't process the request packet, we just silently
	 * drop it.  Otherwise, it will be a deadlock.  note:
	 * ips_proto_is_expected_or_nak() can not be called in this case.
	 */
	if (_get_proto_hfi_opcode(p_hdr) == OPCODE_AM_REQUEST &&
	    !ips_scbctrl_avail(&proto_am->scbc_reply))
		return IPS_RECVHDRQ_CONTINUE;

	if (!ips_proto_is_expected_or_nak(rcv_ev))
		return IPS_RECVHDRQ_CONTINUE;

	uint16_t send_msgseq =
	    __le32_to_cpu(p_hdr->khdr.kdeth0) & HFI_KHDR_MSGSEQ_MASK;
	msgorder = ips_proto_check_msg_order(ipsaddr, flow, send_msgseq,
			&ipsaddr->msgctl->am_recv_seqnum);

	if (msgorder == IPS_MSG_ORDER_FUTURE)
		return IPS_RECVHDRQ_REVISIT;
	else if (msgorder == IPS_MSG_ORDER_FUTURE_RECV) {
		uint64_t *msg_payload;
		uint64_t *payload = ips_recvhdrq_event_payload(rcv_ev);
		uint32_t paylen = ips_recvhdrq_event_paylen(rcv_ev);

		psmi_assert(paylen == 0 || payload);
		msg = psmi_mpool_get(ips_am_msg_pool);
		msg_payload = psmi_sysbuf_alloc(
				ips_recvhdrq_event_paylen(rcv_ev));
		if (unlikely(msg == NULL || msg_payload == NULL)) {
			/* Out of memory, drop the packet. */
			printf("%d OOM dropping %d\n", getpid(), send_msgseq);
			flow->recv_seq_num.psn_num =
				(flow->recv_seq_num.psn_num - 1) &
				rcv_ev->proto->psn_mask;
			return IPS_RECVHDRQ_BREAK;
		}

		memcpy(&msg->p_hdr, p_hdr, sizeof(struct ips_message_header));
		memcpy(msg_payload, payload, paylen);

		msg->payload = msg_payload;
		msg->ipsaddr = ipsaddr;
		msg->proto_am = proto_am;
		msg->paylen = paylen;
		msg->seqnum =
			__le32_to_cpu(p_hdr->khdr.kdeth0) &
			HFI_KHDR_MSGSEQ_MASK;

		ips_proto_am_queue_msg(msg);
	} else if ((msgorder == IPS_MSG_ORDER_EXPECTED) ||
		   (msgorder == IPS_MSG_ORDER_EXPECTED_MATCH)) {
		uint64_t *payload = ips_recvhdrq_event_payload(rcv_ev);
		uint32_t paylen = ips_recvhdrq_event_paylen(rcv_ev);

		psmi_assert(paylen == 0 || payload);
		if (ips_am_run_handler(p_hdr, ipsaddr, proto_am,
					payload, paylen))
			ret = IPS_RECVHDRQ_BREAK;

		ips_proto_am_handle_outoforder_queue();
	}

	/* Look if the handler replied, if it didn't, ack the request */
	if ((__be32_to_cpu(p_hdr->bth[2]) & IPS_SEND_FLAG_ACKREQ) ||
	    (flow->flags & IPS_FLOW_FLAG_GEN_BECN))
		ips_proto_send_ack((struct ips_recvhdrq *)rcv_ev->recvq, flow);

	ips_proto_process_ack(rcv_ev);
	return ret;
}