예제 #1
0
OM_uint32
gss_verify_mic(OM_uint32 *minor_status,
               const gss_ctx_id_t ctx,
               const gss_buffer_t message_buffer,
               const gss_buffer_t token_buffer,
               gss_qop_t *qop_state)
{
    OM_uint32 maj_stat;
    struct mbuf *m, *mic;

    if (!ctx) {
        *minor_status = 0;
        return (GSS_S_NO_CONTEXT);
    }

    MGET(m, M_WAITOK, MT_DATA);
    if (message_buffer->length > MLEN)
        MCLGET(m, M_WAITOK);
    m_append(m, message_buffer->length, message_buffer->value);

    MGET(mic, M_WAITOK, MT_DATA);
    if (token_buffer->length > MLEN)
        MCLGET(mic, M_WAITOK);
    m_append(mic, token_buffer->length, token_buffer->value);

    maj_stat = KGSS_VERIFY_MIC(ctx, minor_status, m, mic, qop_state);

    m_freem(m);
    m_freem(mic);

    return (maj_stat);
}
예제 #2
0
OM_uint32
gss_get_mic(OM_uint32 *minor_status,
    const gss_ctx_id_t ctx,
    gss_qop_t qop_req,
    const gss_buffer_t message_buffer,
    gss_buffer_t message_token)
{
	OM_uint32 maj_stat;
	struct mbuf *m, *mic;

	if (!ctx) {
		*minor_status = 0;
		return (GSS_S_NO_CONTEXT);
	}

	MGET(m, M_WAITOK, MT_DATA);
	if (message_buffer->length > MLEN)
		MCLGET(m, M_WAITOK);
	m_append(m, message_buffer->length, message_buffer->value);

	maj_stat = KGSS_GET_MIC(ctx, minor_status, qop_req, m, &mic);

	m_freem(m);
	if (maj_stat == GSS_S_COMPLETE) {
		message_token->length = m_length(mic, NULL);
		message_token->value = malloc(message_token->length,
		    M_GSSAPI, M_WAITOK);
		m_copydata(mic, 0, message_token->length,
		    message_token->value);
		m_freem(mic);
	}

	return (maj_stat);
}
예제 #3
0
/*
 * Convert to mbufs from vbox scatter-gather data structure
 */
static struct mbuf * vboxNetFltFreeBSDSGMBufFromSG(PVBOXNETFLTINS pThis, PINTNETSG pSG)
{
    struct mbuf *m;
    int error;
    unsigned int i;

    if (pSG->cbTotal == 0)
        return (NULL);

    m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
    if (m == NULL)
        return (NULL);

    m->m_pkthdr.len = m->m_len = 0;
    m->m_pkthdr.rcvif = NULL;

    for (i = 0; i < pSG->cSegsUsed; i++)
    {
        error = m_append(m, pSG->aSegs[i].cb, pSG->aSegs[i].pv);
        if (error == 0)
        {
            m_freem(m);
            return (NULL);
        }
    }
    return (m);
}
예제 #4
0
OM_uint32
gss_wrap(OM_uint32 *minor_status,
    const gss_ctx_id_t ctx,
    int conf_req_flag,
    gss_qop_t qop_req,
    const gss_buffer_t input_message_buffer,
    int *conf_state,
    gss_buffer_t output_message_buffer)
{
	OM_uint32 maj_stat;
	struct mbuf *m;

	if (!ctx) {
		*minor_status = 0;
		return (GSS_S_NO_CONTEXT);
	}

	MGET(m, M_WAITOK, MT_DATA);
	if (input_message_buffer->length > MLEN)
		MCLGET(m, M_WAITOK);
	m_append(m, input_message_buffer->length, input_message_buffer->value);

	maj_stat = KGSS_WRAP(ctx, minor_status, conf_req_flag, qop_req,
	    &m, conf_state);

	/*
	 * On success, m is the wrapped message, on failure, m is
	 * freed.
	 */
	if (maj_stat == GSS_S_COMPLETE) {
		output_message_buffer->length = m_length(m, NULL);
		output_message_buffer->value =
			malloc(output_message_buffer->length,
			    M_GSSAPI, M_WAITOK);
		m_copydata(m, 0, output_message_buffer->length,
		    output_message_buffer->value);
		m_freem(m);
	}

	return (maj_stat);
}
예제 #5
0
static void
cdce_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error)
{
	struct cdce_softc *sc = usbd_xfer_softc(xfer);
	struct ifnet *ifp = uether_getifp(&sc->sc_ue);
	struct mbuf *m;
	struct mbuf *mt;
	uint32_t crc;
	uint8_t x;
	int actlen, aframes;

	usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL);

	DPRINTFN(1, "\n");

	switch (USB_GET_STATE(xfer)) {
	case USB_ST_TRANSFERRED:
		DPRINTFN(11, "transfer complete: %u bytes in %u frames\n",
		    actlen, aframes);

		ifp->if_opackets++;

		/* free all previous TX buffers */
		cdce_free_queue(sc->sc_tx_buf, CDCE_FRAMES_MAX);

		/* FALLTHROUGH */
	case USB_ST_SETUP:
tr_setup:
		for (x = 0; x != CDCE_FRAMES_MAX; x++) {

			IFQ_DRV_DEQUEUE(&ifp->if_snd, m);

			if (m == NULL)
				break;

			if (sc->sc_flags & CDCE_FLAG_ZAURUS) {
				/*
				 * Zaurus wants a 32-bit CRC appended
				 * to every frame
				 */

				crc = cdce_m_crc32(m, 0, m->m_pkthdr.len);
				crc = htole32(crc);

				if (!m_append(m, 4, (void *)&crc)) {
					m_freem(m);
					ifp->if_oerrors++;
					continue;
				}
			}
			if (m->m_len != m->m_pkthdr.len) {
				mt = m_defrag(m, M_DONTWAIT);
				if (mt == NULL) {
					m_freem(m);
					ifp->if_oerrors++;
					continue;
				}
				m = mt;
			}
			if (m->m_pkthdr.len > MCLBYTES) {
				m->m_pkthdr.len = MCLBYTES;
			}
			sc->sc_tx_buf[x] = m;
			usbd_xfer_set_frame_data(xfer, x, m->m_data, m->m_len);

			/*
			 * If there's a BPF listener, bounce a copy of
			 * this frame to him:
			 */
			BPF_MTAP(ifp, m);
		}
		if (x != 0) {
			usbd_xfer_set_frames(xfer, x);

			usbd_transfer_submit(xfer);
		}
		break;

	default:			/* Error */
		DPRINTFN(11, "transfer error, %s\n",
		    usbd_errstr(error));

		/* free all previous TX buffers */
		cdce_free_queue(sc->sc_tx_buf, CDCE_FRAMES_MAX);

		/* count output errors */
		ifp->if_oerrors++;

		if (error != USB_ERR_CANCELLED) {
			/* try to clear stall first */
			usbd_xfer_set_stall(xfer);
			goto tr_setup;
		}
		break;
	}
}
예제 #6
0
파일: nat_cmd.c 프로젝트: JabirTech/Source
  LibAliasSetSkinnyPort(la, port);

  return 0;
}

static struct mbuf *
nat_LayerPush(struct bundle *bundle, struct link *l __unused, struct mbuf *bp,
                int pri __unused, u_short *proto)
{
  if (!bundle->NatEnabled || *proto != PROTO_IP)
    return bp;

  log_Printf(LogDEBUG, "nat_LayerPush: PROTO_IP -> PROTO_IP\n");
  m_settype(bp, MB_NATOUT);
  /* Ensure there's a bit of extra buffer for the NAT code... */
  bp = m_pullup(m_append(bp, NULL, NAT_EXTRABUF));
  LibAliasOut(la, MBUF_CTOP(bp), bp->m_len);
  bp->m_len = ntohs(((struct ip *)MBUF_CTOP(bp))->ip_len);

  return bp;
}

static struct mbuf *
nat_LayerPull(struct bundle *bundle, struct link *l __unused, struct mbuf *bp,
                u_short *proto)
{
  static int gfrags;
  int ret, len, nfrags;
  struct mbuf **last;
  char *fptr;
void TupleDescriberFlexible::m_describe(const TuplesDefaultImpl &tuples, const TupleFormat &format)
{
  const uint8_t width =
      tuples.t_ins().size() + tuples.t_outs().size() < 10
    ? 1
    : (tuples.t_ins().size() + tuples.t_outs().size() < 100 ? 2 : 3)
  ;
  TuplesDefaultImpl::tuples_in_t::const_iterator iter_in = tuples.t_ins().begin();
  TuplesDefaultImpl::tuples_out_t::const_iterator iter_out = tuples.t_outs().begin();
  const TupleBase *ptuple;
  tupleDirection dir = dirIn;
  uint8_t
      cnt = 0
    , cnt_in = 0
    , cnt_out = 0
  ;

  ostringstream ostr;

  if (tuples.cv() == v_ctor && (tuples.t_ins().size() || tuples.t_outs().size()))
  {
    ostr
      << "Constructors:" << endl
    ;
  }

  while (true)
  {
    if (dir == dirIn && iter_in != tuples.t_ins().end())
    {
      ptuple = *iter_in ++;
    }
    else if (iter_out != tuples.t_outs().end())
    {
      if (dir == dirIn)
      {
        dir = dirOut;
        cnt = 0;
      }
      ptuple = *iter_out ++;
    }
    else
    {
      break;
    }

    describeOptions opt;
    if (ptuple->description.size() == ptuple->index().size())
    {
      opt = optPerLine;
    }
    else if (ptuple->description.size() == ptuple->index().size() + 1)
    {
      opt = optPerLineWithHeader;
    }
    else
    {
      opt = optRaw;
    }

    if (dir == dirOut && !cnt_out && cnt_in)
    {
      ostr << endl;
    }

    if (ptuple->errors().size())
    {
      m_description_errors(ostr, *ptuple, format, dir, cnt);
    }
    else
    {
      TupleBase::strings_t::const_iterator iter_descs = ptuple->description.begin();
      m_description_formatted_header(ostr, iter_descs, *ptuple, format, dir, opt, cnt);
      m_description_formatted_body(ostr, iter_descs, *ptuple, format, dir, opt);
    }

    cnt ++;
    if (dir == dirIn)
    {
      cnt_in ++;
    }
    else if (dir == dirOut)
    {
      cnt_out ++;
    }
  }
  m_append(ostr.str(), false);
}
void TupleDescriberFlexible::append(const char *s)
{
  m_append(s ? s : "");
}
void TupleDescriberFlexible::help(const char *help)
{
  m_append(help ? help : "");
}
예제 #10
0
bool_t
xdr_rpc_gss_wrap_data(struct mbuf **argsp,
                      gss_ctx_id_t ctx, gss_qop_t qop,
                      rpc_gss_service_t svc, u_int seq)
{
    struct mbuf	*args, *mic;
    OM_uint32	maj_stat, min_stat;
    int		conf_state;
    u_int		len;
    static char	zpad[4];

    args = *argsp;

    /*
     * Prepend the sequence number before calling gss_get_mic or gss_wrap.
     */
    put_uint32(&args, seq);
    len = m_length(args, NULL);

    if (svc == rpc_gss_svc_integrity) {
        /* Checksum rpc_gss_data_t. */
        maj_stat = gss_get_mic_mbuf(&min_stat, ctx, qop, args, &mic);
        if (maj_stat != GSS_S_COMPLETE) {
            rpc_gss_log_debug("gss_get_mic failed");
            m_freem(args);
            return (FALSE);
        }

        /*
         * Marshal databody_integ. Note that since args is
         * already RPC encoded, there will be no padding.
         */
        put_uint32(&args, len);

        /*
         * Marshal checksum. This is likely to need padding.
         */
        len = m_length(mic, NULL);
        put_uint32(&mic, len);
        if (len != RNDUP(len)) {
            m_append(mic, RNDUP(len) - len, zpad);
        }

        /*
         * Concatenate databody_integ with checksum.
         */
        m_cat(args, mic);
    } else if (svc == rpc_gss_svc_privacy) {
        /* Encrypt rpc_gss_data_t. */
        maj_stat = gss_wrap_mbuf(&min_stat, ctx, TRUE, qop,
                                 &args, &conf_state);
        if (maj_stat != GSS_S_COMPLETE) {
            rpc_gss_log_status("gss_wrap", NULL,
                               maj_stat, min_stat);
            return (FALSE);
        }

        /*
         *  Marshal databody_priv and deal with RPC padding.
         */
        len = m_length(args, NULL);
        put_uint32(&args, len);
        if (len != RNDUP(len)) {
            m_append(args, RNDUP(len) - len, zpad);
        }
    }
    *argsp = args;
    return (TRUE);
}
예제 #11
0
/**
 * Packet transmit
 *
 * @param m    Packet to send
 * @param dev    Device info structure
 * @return Always returns zero
 */
int cvm_oct_xmit(struct mbuf *m, struct ifnet *ifp)
{
	cvmx_pko_command_word0_t    pko_command;
	cvmx_buf_ptr_t              hw_buffer;
	int                         dropped;
	int                         qos;
	cvm_oct_private_t          *priv = (cvm_oct_private_t *)ifp->if_softc;
	int32_t in_use;
	int32_t buffers_to_free;
	cvmx_wqe_t *work;

	/* Prefetch the private data structure.
	   It is larger that one cache line */
	CVMX_PREFETCH(priv, 0);

	/* Start off assuming no drop */
	dropped = 0;

	/* The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to completely
	   remove "qos" in the event neither interface supports multiple queues
	   per port */
	if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
	    (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
		qos = GET_MBUF_QOS(m);
		if (qos <= 0)
			qos = 0;
		else if (qos >= cvmx_pko_get_num_queues(priv->port))
			qos = 0;
	} else
		qos = 0;

	/* The CN3XXX series of parts has an errata (GMX-401) which causes the
	   GMX block to hang if a collision occurs towards the end of a
	   <68 byte packet. As a workaround for this, we pad packets to be
	   68 bytes whenever we are in half duplex mode. We don't handle
	   the case of having a small packet but no room to add the padding.
	   The kernel should always give us at least a cache line */
	if (__predict_false(m->m_pkthdr.len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
		cvmx_gmxx_prtx_cfg_t gmx_prt_cfg;
		int interface = INTERFACE(priv->port);
		int index = INDEX(priv->port);

		if (interface < 2) {
			/* We only need to pad packet in half duplex mode */
			gmx_prt_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
			if (gmx_prt_cfg.s.duplex == 0) {
				static uint8_t pad[64];

				if (!m_append(m, sizeof pad - m->m_pkthdr.len, pad))
					printf("%s: unable to padd small packet.", __func__);
			}
		}
	}

#ifdef OCTEON_VENDOR_RADISYS
	/*
	 * The RSYS4GBE will hang if asked to transmit a packet less than 60 bytes.
	 */
	if (__predict_false(m->m_pkthdr.len < 60) &&
	    cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_RADISYS_RSYS4GBE) {
		static uint8_t pad[60];

		if (!m_append(m, sizeof pad - m->m_pkthdr.len, pad))
			printf("%s: unable to pad small packet.", __func__);
	}
#endif

	/*
	 * If the packet is not fragmented.
	 */
	if (m->m_pkthdr.len == m->m_len) {
		/* Build the PKO buffer pointer */
		hw_buffer.u64 = 0;
		hw_buffer.s.addr = cvmx_ptr_to_phys(m->m_data);
		hw_buffer.s.pool = 0;
		hw_buffer.s.size = m->m_len;

		/* Build the PKO command */
		pko_command.u64 = 0;
		pko_command.s.segs = 1;
		pko_command.s.dontfree = 1; /* Do not put this buffer into the FPA.  */

		work = NULL;
	} else {
		struct mbuf *n;
		unsigned segs;
		uint64_t *gp;

		/*
		 * The packet is fragmented, we need to send a list of segments
		 * in memory we borrow from the WQE pool.
		 */
		work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
		if (work == NULL) {
			m_freem(m);
			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
			return 1;
		}

		segs = 0;
		gp = (uint64_t *)work;
		for (n = m; n != NULL; n = n->m_next) {
			if (segs == CVMX_FPA_WQE_POOL_SIZE / sizeof (uint64_t))
				panic("%s: too many segments in packet; call m_collapse().", __func__);

			/* Build the PKO buffer pointer */
			hw_buffer.u64 = 0;
			hw_buffer.s.i = 1; /* Do not put this buffer into the FPA.  */
			hw_buffer.s.addr = cvmx_ptr_to_phys(n->m_data);
			hw_buffer.s.pool = 0;
			hw_buffer.s.size = n->m_len;

			*gp++ = hw_buffer.u64;
			segs++;
		}

		/* Build the PKO buffer gather list pointer */
		hw_buffer.u64 = 0;
		hw_buffer.s.addr = cvmx_ptr_to_phys(work);
		hw_buffer.s.pool = CVMX_FPA_WQE_POOL;
		hw_buffer.s.size = segs;

		/* Build the PKO command */
		pko_command.u64 = 0;
		pko_command.s.segs = segs;
		pko_command.s.gather = 1;
		pko_command.s.dontfree = 0; /* Put the WQE above back into the FPA.  */
	}

	/* Finish building the PKO command */
	pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
	pko_command.s.reg0 = priv->fau+qos*4;
	pko_command.s.total_bytes = m->m_pkthdr.len;
	pko_command.s.size0 = CVMX_FAU_OP_SIZE_32;
	pko_command.s.subone0 = 1;

	/* Check if we can use the hardware checksumming */
	if ((m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) != 0) {
		/* Use hardware checksum calc */
		pko_command.s.ipoffp1 = ETHER_HDR_LEN + 1;
	}

	/*
	 * XXX
	 * Could use a different free queue (and different FAU address) per
	 * core instead of per QoS, to reduce contention here.
	 */
	IF_LOCK(&priv->tx_free_queue[qos]);
	/* Get the number of mbufs in use by the hardware */
	in_use = cvmx_fau_fetch_and_add32(priv->fau+qos*4, 1);
	buffers_to_free = cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);

	cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, CVMX_PKO_LOCK_CMD_QUEUE);

	/* Drop this packet if we have too many already queued to the HW */
	if (_IF_QFULL(&priv->tx_free_queue[qos])) {
		dropped = 1;
	}
	/* Send the packet to the output queue */
	else
	if (__predict_false(cvmx_pko_send_packet_finish(priv->port, priv->queue + qos, pko_command, hw_buffer, CVMX_PKO_LOCK_CMD_QUEUE))) {
		DEBUGPRINT("%s: Failed to send the packet\n", if_name(ifp));
		dropped = 1;
	}

	if (__predict_false(dropped)) {
		m_freem(m);
		cvmx_fau_atomic_add32(priv->fau+qos*4, -1);
		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
	} else {
		/* Put this packet on the queue to be freed later */
		_IF_ENQUEUE(&priv->tx_free_queue[qos], m);

		/* Pass it to any BPF listeners.  */
		ETHER_BPF_MTAP(ifp, m);

		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
		if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
	}

	/* Free mbufs not in use by the hardware */
	if (_IF_QLEN(&priv->tx_free_queue[qos]) > in_use) {
		while (_IF_QLEN(&priv->tx_free_queue[qos]) > in_use) {
			_IF_DEQUEUE(&priv->tx_free_queue[qos], m);
			m_freem(m);
		}
	}
	IF_UNLOCK(&priv->tx_free_queue[qos]);

	return dropped;
}