示例#1
0
static int
npf_reassembly(npf_t *npf, npf_cache_t *npc, struct mbuf **mp)
{
	nbuf_t *nbuf = npc->npc_nbuf;
	int error = EINVAL;

	/* Reset the mbuf as it may have changed. */
	*mp = nbuf_head_mbuf(nbuf);
	nbuf_reset(nbuf);

	if (npf_iscached(npc, NPC_IP4)) {
		struct ip *ip = nbuf_dataptr(nbuf);
		error = ip_reass_packet(mp, ip);
	} else if (npf_iscached(npc, NPC_IP6)) {
		/*
		 * Note: ip6_reass_packet() offset is the start of
		 * the fragment header.
		 */
		error = ip6_reass_packet(mp, npc->npc_hlen);
		if (error && *mp == NULL) {
			memset(nbuf, 0, sizeof(nbuf_t));
		}
	}
	if (error) {
		npf_stats_inc(npf, NPF_STAT_REASSFAIL);
		return error;
	}
	if (*mp == NULL) {
		/* More fragments should come. */
		npf_stats_inc(npf, NPF_STAT_FRAGMENTS);
		return 0;
	}

	/*
	 * Reassembly is complete, we have the final packet.
	 * Cache again, since layer 4 data is accessible now.
	 */
	nbuf_init(npf, nbuf, *mp, nbuf->nb_ifp);
	npc->npc_info = 0;

	if (npf_cache_all(npc) & NPC_IPFRAG) {
		return EINVAL;
	}
	npf_stats_inc(npf, NPF_STAT_REASSEMBLY);
	return 0;
}
示例#2
0
static char *
parse_nbuf_chain(struct mbuf *m)
{
	ifnet_t *dummy_ifp = npf_test_addif(IFNAME_TEST, false, false);
	char *s = kmem_zalloc(MBUF_CHAIN_LEN + 1, KM_SLEEP);
	nbuf_t nbuf;
	void *nptr;
	int n;

	nbuf_init(npf_getkernctx(), &nbuf, m, dummy_ifp);

	nptr = nbuf_advance(&nbuf, (random() % 16) + 1, (random() % 16) + 1);
	mbuf_consistency_check(&nbuf);
	assert(nptr != NULL);
	nbuf_reset(&nbuf);

	for (n = 0; ; ) {
		char d[4 + 1];

		nptr = nbuf_ensure_contig(&nbuf, sizeof(uint32_t));
		if (nptr == NULL) {
			break;
		}
		mbuf_consistency_check(&nbuf);
		memcpy(&d, nptr, sizeof(uint32_t));

		d[sizeof(d) - 1] = '\0';
		strcat(s, d);

		if (n + sizeof(uint32_t) == MBUF_CHAIN_LEN) {
			assert(nbuf_advance(&nbuf, sizeof(uint32_t) - 1, 0));
			assert(!nbuf_advance(&nbuf, 1, 0));
			break;
		}
		if (!nbuf_advance(&nbuf, sizeof(uint32_t), 0)) {
			break;
		}
		n += sizeof(uint32_t);
	}
	mbuf_consistency_check(&nbuf);
	return s;
}
示例#3
0
static int
test_bpf_code(void *code, size_t size)
{
	ifnet_t *dummy_ifp = npf_test_addif(IFNAME_TEST, false, false);
	npf_cache_t npc = { .npc_info = 0 };
	bpf_args_t bc_args;
	struct mbuf *m;
	nbuf_t nbuf;
	int ret, jret;
	void *jcode;

	/* Layer 3 (IP + TCP). */
	m = fill_packet(IPPROTO_TCP);
	nbuf_init(&nbuf, m, dummy_ifp);
	npf_cache_all(&npc, &nbuf);

	memset(&bc_args, 0, sizeof(bpf_args_t));
	bc_args.pkt = m;
	bc_args.wirelen = m_length(m);
	bc_args.arg = &npc;

	ret = npf_bpf_filter(&bc_args, code, NULL);

	/* JIT-compiled code. */
	jcode = npf_bpf_compile(code, size);
	if (jcode) {
		jret = npf_bpf_filter(&bc_args, NULL, jcode);
		assert(ret == jret);
		bpf_jit_freecode(jcode);
	} else if (lverbose) {
		printf("JIT-compilation failed\n");
	}
	m_freem(m);

	return ret;
}

static uint32_t
npf_bpfcop_run(u_int reg)
{
	struct bpf_insn insns_npf_bpfcop[] = {
		BPF_STMT(BPF_MISC+BPF_COP, NPF_COP_L3),
		BPF_STMT(BPF_LD+BPF_W+BPF_MEM, reg),
		BPF_STMT(BPF_RET+BPF_A, 0),
	};
	return test_bpf_code(&insns_npf_bpfcop, sizeof(insns_npf_bpfcop));
}

static bool
npf_bpfcop_test(void)
{
	bool fail = false;

	/* A <- IP version (4 or 6) */
	struct bpf_insn insns_ipver[] = {
		BPF_STMT(BPF_MISC+BPF_COP, NPF_COP_L3),
		BPF_STMT(BPF_RET+BPF_A, 0),
	};
	fail |= (test_bpf_code(&insns_ipver, sizeof(insns_ipver)) != IPVERSION);

	/* BPF_MW_IPVERI <- IP version */
	fail |= (npf_bpfcop_run(BPF_MW_IPVER) != IPVERSION);

	/* BPF_MW_L4OFF <- L4 header offset */
	fail |= (npf_bpfcop_run(BPF_MW_L4OFF) != sizeof(struct ip));

	/* BPF_MW_L4PROTO <- L4 protocol */
	fail |= (npf_bpfcop_run(BPF_MW_L4PROTO) != IPPROTO_TCP);

	return fail;
}
示例#4
0
/*
 * npf_packet_handler: main packet handling routine for layer 3.
 *
 * Note: packet flow and inspection logic is in strict order.
 */
int
npf_packet_handler(void *arg, struct mbuf **mp, ifnet_t *ifp, int di)
{
	nbuf_t nbuf;
	npf_cache_t npc;
	npf_session_t *se;
	npf_rule_t *rl;
	npf_rproc_t *rp;
	int error, retfl;
	int decision;

	/*
	 * Initialise packet information cache.
	 * Note: it is enough to clear the info bits.
	 */
	KASSERT(ifp != NULL);
	nbuf_init(&nbuf, *mp, ifp);
	npc.npc_info = 0;
	decision = NPF_DECISION_BLOCK;
	error = 0;
	retfl = 0;
	rp = NULL;

	/* Cache everything.  Determine whether it is an IP fragment. */
	if (npf_cache_all(&npc, &nbuf) & NPC_IPFRAG) {
		/*
		 * Pass to IPv4 or IPv6 reassembly mechanism.
		 */
		error = npf_reassembly(&npc, &nbuf, mp);
		if (error) {
			se = NULL;
			goto out;
		}
		if (*mp == NULL) {
			/* More fragments should come; return. */
			return 0;
		}
	}

	/* Inspect the list of sessions (if found, acquires a reference). */
	se = npf_session_inspect(&npc, &nbuf, di, &error);

	/* If "passing" session found - skip the ruleset inspection. */
	if (se && npf_session_pass(se, &rp)) {
		npf_stats_inc(NPF_STAT_PASS_SESSION);
		KASSERT(error == 0);
		goto pass;
	}
	if (error) {
		if (error == ENETUNREACH)
			goto block;
		goto out;
	}

	/* Acquire the lock, inspect the ruleset using this packet. */
	int slock = npf_config_read_enter();
	npf_ruleset_t *rlset = npf_config_ruleset();

	rl = npf_ruleset_inspect(&npc, &nbuf, rlset, di, NPF_LAYER_3);
	if (rl == NULL) {
		const bool pass = npf_default_pass();
		npf_config_read_exit(slock);

		if (pass) {
			npf_stats_inc(NPF_STAT_PASS_DEFAULT);
			goto pass;
		}
		npf_stats_inc(NPF_STAT_BLOCK_DEFAULT);
		goto block;
	}

	/*
	 * Get the rule procedure (acquires a reference) for association
	 * with a session (if any) and execution.
	 */
	KASSERT(rp == NULL);
	rp = npf_rule_getrproc(rl);

	/* Conclude with the rule and release the lock. */
	error = npf_rule_conclude(rl, &retfl);
	npf_config_read_exit(slock);

	if (error) {
		npf_stats_inc(NPF_STAT_BLOCK_RULESET);
		goto block;
	}
	npf_stats_inc(NPF_STAT_PASS_RULESET);

	/*
	 * Establish a "pass" session, if required.  Just proceed,
	 * if session creation fails (e.g. due to unsupported protocol).
	 */
	if ((retfl & NPF_RULE_STATEFUL) != 0 && !se) {
		se = npf_session_establish(&npc, &nbuf, di,
		    (retfl & NPF_RULE_MULTIENDS) == 0);
		if (se) {
			/*
			 * Note: the reference on the rule procedure is
			 * transfered to the session.  It will be released
			 * on session destruction.
			 */
			npf_session_setpass(se, rp);
		}
	}
pass:
	decision = NPF_DECISION_PASS;
	KASSERT(error == 0);
	/*
	 * Perform NAT.
	 */
	error = npf_do_nat(&npc, se, &nbuf, di);
block:
	/*
	 * Execute the rule procedure, if any is associated.
	 * It may reverse the decision from pass to block.
	 */
	if (rp && !npf_rproc_run(&npc, &nbuf, rp, &decision)) {
		if (se) {
			npf_session_release(se);
		}
		npf_rproc_release(rp);
		*mp = NULL;
		return 0;
	}
out:
	/*
	 * Release the reference on a session.  Release the reference on a
	 * rule procedure only if there was no association.
	 */
	if (se) {
		npf_session_release(se);
	} else if (rp) {
		npf_rproc_release(rp);
	}

	/* Reset mbuf pointer before returning to the caller. */
	if ((*mp = nbuf_head_mbuf(&nbuf)) == NULL) {
		return error ? error : ENOMEM;
	}

	/* Pass the packet if decided and there is no error. */
	if (decision == NPF_DECISION_PASS && !error) {
		/*
		 * XXX: Disable for now, it will be set accordingly later,
		 * for optimisations (to reduce inspection).
		 */
		(*mp)->m_flags &= ~M_CANFASTFWD;
		return 0;
	}

	/*
	 * Block the packet.  ENETUNREACH is used to indicate blocking.
	 * Depending on the flags and protocol, return TCP reset (RST) or
	 * ICMP destination unreachable.
	 */
	if (retfl && npf_return_block(&npc, &nbuf, retfl)) {
		*mp = NULL;
	}

	if (!error) {
		error = ENETUNREACH;
	}

	if (*mp) {
		m_freem(*mp);
		*mp = NULL;
	}
	return error;
}
示例#5
0
err_t
mcf523xfec_init( struct netif *netif )
{
    err_t           res;

    mcf523xfec_if_t *fecif = mem_malloc( sizeof( mcf523xfec_if_t ) );

    if( fecif != NULL )
    {
        /* Global copy used in ISR. */
        fecif_g = fecif;
        fecif->self = ( struct eth_addr * )&netif->hwaddr[0];
        fecif->netif = netif;
        fecif->tx_sem = NULL;
        fecif->rx_sem = NULL;

        if( ( fecif->tx_sem = sys_sem_new( 1 ) ) == NULL )
        {
            res = ERR_MEM;
        }
        else if( ( fecif->rx_sem = sys_sem_new( 0 ) ) == NULL )
        {
            res = ERR_MEM;
        }
        else if( sys_thread_new( mcf523xfec_rx_task, fecif, TASK_PRIORITY ) == NULL )
        {
            res = ERR_MEM;
        }
        else
        {
            netif->state = fecif;
            netif->name[0] = 'C';
            netif->name[1] = 'F';
            netif->hwaddr_len = ETH_ADDR_LEN;
            netif->mtu = MCF_FEC_MTU;
            netif->flags = NETIF_FLAG_BROADCAST;
            netif->output = mcf523xfec_output;
            netif->linkoutput = mcf523xfec_output_raw;

            nbuf_init(  );
            mcf523xfec_get_mac( fecif, fecif->self );
            mcf523xfec_reset( fecif );
            mcf523xfec_enable( fecif );

            etharp_init(  );
            sys_timeout( ARP_TMR_INTERVAL, arp_timer, NULL );

            res = ERR_OK;
        }

        if( res != ERR_OK )
        {
            free( fecif );
            if( fecif->tx_sem != NULL )
            {
                mem_free( fecif->tx_sem );
            }
            if( fecif->rx_sem != NULL )
            {
                mem_free( fecif->rx_sem );
            }
        }
    }
    else
    {
        res = ERR_MEM;
    }

    return res;
}