示例#1
0
文件: in_pcblist.c 项目: jndok/xnu
__private_extern__ void
sotoxsocket_n(struct socket *so, struct xsocket_n *xso)
{
	xso->xso_len = sizeof (struct xsocket_n);
	xso->xso_kind = XSO_SOCKET;

	if (so != NULL) {
		xso->xso_so = (uint64_t)VM_KERNEL_ADDRPERM(so);
		xso->so_type = so->so_type;
		xso->so_options = so->so_options;
		xso->so_linger = so->so_linger;
		xso->so_state = so->so_state;
		xso->so_pcb = (uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb);
		if (so->so_proto) {
			xso->xso_protocol = SOCK_PROTO(so);
			xso->xso_family = SOCK_DOM(so);
		} else {
			xso->xso_protocol = xso->xso_family = 0;
		}
		xso->so_qlen = so->so_qlen;
		xso->so_incqlen = so->so_incqlen;
		xso->so_qlimit = so->so_qlimit;
		xso->so_timeo = so->so_timeo;
		xso->so_error = so->so_error;
		xso->so_pgid = so->so_pgid;
		xso->so_oobmark = so->so_oobmark;
		xso->so_uid = kauth_cred_getuid(so->so_cred);
		xso->so_last_pid = so->last_pid;
		xso->so_e_pid = so->e_pid;
	}
}
示例#2
0
文件: in_pcblist.c 项目: jndok/xnu
static void
inpcb_to_xinpcb_n(struct inpcb *inp, struct xinpcb_n *xinp)
{
	xinp->xi_len = sizeof (struct xinpcb_n);
	xinp->xi_kind = XSO_INPCB;
	xinp->xi_inpp = (uint64_t)VM_KERNEL_ADDRPERM(inp);
	xinp->inp_fport = inp->inp_fport;
	xinp->inp_lport = inp->inp_lport;
	xinp->inp_ppcb = (uint64_t)VM_KERNEL_ADDRPERM(inp->inp_ppcb);
	xinp->inp_gencnt = inp->inp_gencnt;
	xinp->inp_flags = inp->inp_flags;
	xinp->inp_flow = inp->inp_flow;
	xinp->inp_vflag = inp->inp_vflag;
	xinp->inp_ip_ttl = inp->inp_ip_ttl;
	xinp->inp_ip_p = inp->inp_ip_p;
	xinp->inp_dependfaddr.inp6_foreign = inp->inp_dependfaddr.inp6_foreign;
	xinp->inp_dependladdr.inp6_local = inp->inp_dependladdr.inp6_local;
	xinp->inp_depend4.inp4_ip_tos = inp->inp_depend4.inp4_ip_tos;
	xinp->inp_depend6.inp6_hlim = 0;
	xinp->inp_depend6.inp6_cksum = inp->inp_depend6.inp6_cksum;
	xinp->inp_depend6.inp6_ifindex = 0;
	xinp->inp_depend6.inp6_hops = inp->inp_depend6.inp6_hops;
	xinp->inp_flowhash = inp->inp_flowhash;
	xinp->inp_flags2 = inp->inp_flags2;
}
示例#3
0
static void
fill_common_sockinfo(struct socket *so, struct socket_info *si)
{
	si->soi_so = (u_int64_t)VM_KERNEL_ADDRPERM(so);
	si->soi_type = so->so_type;
	si->soi_options = (short)(so->so_options & 0xffff);
	si->soi_linger = so->so_linger;
	si->soi_state = so->so_state;
	si->soi_pcb = (u_int64_t)VM_KERNEL_ADDRPERM(so->so_pcb);
	if (so->so_proto) {
		si->soi_protocol = SOCK_PROTO(so);
		if (so->so_proto->pr_domain)
			si->soi_family = SOCK_DOM(so);
		else
			si->soi_family = 0;
	} else {
		si->soi_protocol = si->soi_family = 0;
	}
	si->soi_qlen = so->so_qlen;
	si->soi_incqlen = so->so_incqlen;
	si->soi_qlimit = so->so_qlimit;
	si->soi_timeo = so->so_timeo;
	si->soi_error = so->so_error;
	si->soi_oobmark = so->so_oobmark;
	fill_sockbuf_info(&so->so_snd, &si->soi_snd);
	fill_sockbuf_info(&so->so_rcv, &si->soi_rcv);
}
示例#4
0
文件: in_pcblist.c 项目: jndok/xnu
__private_extern__ void
tcpcb_to_xtcpcb_n(struct tcpcb *tp, struct xtcpcb_n *xt)
{
	xt->xt_len = sizeof (struct xtcpcb_n);
	xt->xt_kind = XSO_TCPCB;

	xt->t_segq = (uint32_t)VM_KERNEL_ADDRPERM(tp->t_segq.lh_first);
	xt->t_dupacks = tp->t_dupacks;
	xt->t_timer[TCPT_REXMT_EXT] = tp->t_timer[TCPT_REXMT];
	xt->t_timer[TCPT_PERSIST_EXT] = tp->t_timer[TCPT_PERSIST];
	xt->t_timer[TCPT_KEEP_EXT] = tp->t_timer[TCPT_KEEP];
	xt->t_timer[TCPT_2MSL_EXT] = tp->t_timer[TCPT_2MSL];
	xt->t_state = tp->t_state;
	xt->t_flags = tp->t_flags;
	xt->t_force = (tp->t_flagsext & TF_FORCE) ? 1 : 0;
	xt->snd_una = tp->snd_una;
	xt->snd_max = tp->snd_max;
	xt->snd_nxt = tp->snd_nxt;
	xt->snd_up = tp->snd_up;
	xt->snd_wl1 = tp->snd_wl1;
	xt->snd_wl2 = tp->snd_wl2;
	xt->iss = tp->iss;
	xt->irs = tp->irs;
	xt->rcv_nxt = tp->rcv_nxt;
	xt->rcv_adv = tp->rcv_adv;
	xt->rcv_wnd = tp->rcv_wnd;
	xt->rcv_up = tp->rcv_up;
	xt->snd_wnd = tp->snd_wnd;
	xt->snd_cwnd = tp->snd_cwnd;
	xt->snd_ssthresh = tp->snd_ssthresh;
	xt->t_maxopd = tp->t_maxopd;
	xt->t_rcvtime = tp->t_rcvtime;
	xt->t_starttime = tp->t_starttime;
	xt->t_rtttime = tp->t_rtttime;
	xt->t_rtseq = tp->t_rtseq;
	xt->t_rxtcur = tp->t_rxtcur;
	xt->t_maxseg = tp->t_maxseg;
	xt->t_srtt = tp->t_srtt;
	xt->t_rttvar = tp->t_rttvar;
	xt->t_rxtshift = tp->t_rxtshift;
	xt->t_rttmin = tp->t_rttmin;
	xt->t_rttupdated = tp->t_rttupdated;
	xt->max_sndwnd = tp->max_sndwnd;
	xt->t_softerror = tp->t_softerror;
	xt->t_oobflags = tp->t_oobflags;
	xt->t_iobc = tp->t_iobc;
	xt->snd_scale = tp->snd_scale;
	xt->rcv_scale = tp->rcv_scale;
	xt->request_r_scale = tp->request_r_scale;
	xt->requested_s_scale = tp->requested_s_scale;
	xt->ts_recent = tp->ts_recent;
	xt->ts_recent_age = tp->ts_recent_age;
	xt->last_ack_sent = tp->last_ack_sent;
	xt->cc_send = 0;
	xt->cc_recv = 0;
	xt->snd_recover = tp->snd_recover;
	xt->snd_cwnd_prev = tp->snd_cwnd_prev;
	xt->snd_ssthresh_prev = tp->snd_ssthresh_prev;
}
示例#5
0
文件: in_pcblist.c 项目: jndok/xnu
__private_extern__ uint32_t
inpcb_count_opportunistic(unsigned int ifindex, struct inpcbinfo *pcbinfo,
    u_int32_t flags)
{
	uint32_t opportunistic = 0;
	struct inpcb *inp;
	inp_gen_t gencnt;

	lck_rw_lock_shared(pcbinfo->ipi_lock);
	gencnt = pcbinfo->ipi_gencnt;
	for (inp = LIST_FIRST(pcbinfo->ipi_listhead);
	    inp != NULL; inp = LIST_NEXT(inp, inp_list)) {
		if (inp->inp_gencnt <= gencnt &&
		    inp->inp_state != INPCB_STATE_DEAD &&
		    inp->inp_socket != NULL &&
		    so_get_opportunistic(inp->inp_socket) &&
		    inp->inp_last_outifp != NULL &&
		    ifindex == inp->inp_last_outifp->if_index) {
			opportunistic++;
			struct socket *so = inp->inp_socket;
			if ((flags & INPCB_OPPORTUNISTIC_SETCMD) &&
			    (so->so_state & SS_ISCONNECTED)) {
				socket_lock(so, 1);
				if (flags & INPCB_OPPORTUNISTIC_THROTTLEON) {
					so->so_flags |= SOF_SUSPENDED;
					soevent(so,
					    (SO_FILT_HINT_LOCKED |
					    SO_FILT_HINT_SUSPEND));
				} else {
					so->so_flags &= ~(SOF_SUSPENDED);
					soevent(so,
					    (SO_FILT_HINT_LOCKED |
					    SO_FILT_HINT_RESUME));
				}
				SOTHROTTLELOG(("throttle[%d]: so 0x%llx "
				    "[%d,%d] %s\n", so->last_pid,
				    (uint64_t)VM_KERNEL_ADDRPERM(so),
				    SOCK_DOM(so), SOCK_TYPE(so),
				    (so->so_flags & SOF_SUSPENDED) ?
				    "SUSPENDED" : "RESUMED"));
				socket_unlock(so, 1);
			}
		}
	}

	lck_rw_done(pcbinfo->ipi_lock);

	return (opportunistic);
}
示例#6
0
文件: mach_debug.c 项目: Bitesher/xnu
kern_return_t
mach_port_kobject(
	ipc_space_t			space,
	mach_port_name_t		name,
	natural_t			*typep,
	mach_vm_address_t		*addrp)
{
	ipc_entry_t entry;
	ipc_port_t port;
	kern_return_t kr;
	mach_vm_address_t kaddr;

	if (space == IS_NULL)
		return KERN_INVALID_TASK;

	kr = ipc_right_lookup_read(space, name, &entry);
	if (kr != KERN_SUCCESS)
		return kr;
	/* space is read-locked and active */

	if ((entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE) == 0) {
		is_read_unlock(space);
		return KERN_INVALID_RIGHT;
	}

	port = (ipc_port_t) entry->ie_object;
	assert(port != IP_NULL);

	ip_lock(port);
	is_read_unlock(space);

	if (!ip_active(port)) {
		ip_unlock(port);
		return KERN_INVALID_RIGHT;
	}

	*typep = (unsigned int) ip_kotype(port);
	kaddr = (mach_vm_address_t)port->ip_kobject;
	ip_unlock(port);

	if (0 != kaddr && is_ipc_kobject(*typep))
		*addrp = VM_KERNEL_ADDRPERM(VM_KERNEL_UNSLIDE(kaddr));
	else
		*addrp = 0;

	return KERN_SUCCESS;
}
示例#7
0
int
pipe_stat(struct pipe *cpipe, void *ub, int isstat64)
{
#if CONFIG_MACF
        int error;
#endif
	int	pipe_size = 0;
	int	pipe_count;
	struct stat *sb = (struct stat *)0;	/* warning avoidance ; protected by isstat64 */
	struct stat64 * sb64 = (struct stat64 *)0;  /* warning avoidance ; protected by isstat64 */

	if (cpipe == NULL)
	        return (EBADF);
	PIPE_LOCK(cpipe);

#if CONFIG_MACF
	error = mac_pipe_check_stat(kauth_cred_get(), cpipe);
	if (error) {
		PIPE_UNLOCK(cpipe);
	        return (error);
	}
#endif
	if (cpipe->pipe_buffer.buffer == 0) {
	        /* must be stat'ing the write fd */
	        if (cpipe->pipe_peer) {
		        /* the peer still exists, use it's info */
		        pipe_size  = MAX_PIPESIZE(cpipe->pipe_peer);
			pipe_count = cpipe->pipe_peer->pipe_buffer.cnt;
		} else {
			pipe_count = 0;
		}
	} else {
	        pipe_size  = MAX_PIPESIZE(cpipe);
		pipe_count = cpipe->pipe_buffer.cnt;
	}
	/*
	 * since peer's buffer is setup ouside of lock
	 * we might catch it in transient state
	 */
	if (pipe_size == 0)
		pipe_size  = MAX(PIPE_SIZE, pipesize_blocks[0]);

	if (isstat64 != 0) {
		sb64 = (struct stat64 *)ub;	

		bzero(sb64, sizeof(*sb64));
		sb64->st_mode = S_IFIFO | S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP;
		sb64->st_blksize = pipe_size;
		sb64->st_size = pipe_count;
		sb64->st_blocks = (sb64->st_size + sb64->st_blksize - 1) / sb64->st_blksize;
	
		sb64->st_uid = kauth_getuid();
		sb64->st_gid = kauth_getgid();
	
		sb64->st_atimespec.tv_sec  = cpipe->st_atimespec.tv_sec;
		sb64->st_atimespec.tv_nsec = cpipe->st_atimespec.tv_nsec;
	
		sb64->st_mtimespec.tv_sec  = cpipe->st_mtimespec.tv_sec;
		sb64->st_mtimespec.tv_nsec = cpipe->st_mtimespec.tv_nsec;

		sb64->st_ctimespec.tv_sec  = cpipe->st_ctimespec.tv_sec;
		sb64->st_ctimespec.tv_nsec = cpipe->st_ctimespec.tv_nsec;

		/*
	 	* Return a relatively unique inode number based on the current
	 	* address of this pipe's struct pipe.  This number may be recycled
	 	* relatively quickly.
	 	*/
		sb64->st_ino = (ino64_t)VM_KERNEL_ADDRPERM((uintptr_t)cpipe);
	} else {
		sb = (struct stat *)ub;	

		bzero(sb, sizeof(*sb));
		sb->st_mode = S_IFIFO | S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP;
		sb->st_blksize = pipe_size;
		sb->st_size = pipe_count;
		sb->st_blocks = (sb->st_size + sb->st_blksize - 1) / sb->st_blksize;
	
		sb->st_uid = kauth_getuid();
		sb->st_gid = kauth_getgid();
	
		sb->st_atimespec.tv_sec  = cpipe->st_atimespec.tv_sec;
		sb->st_atimespec.tv_nsec = cpipe->st_atimespec.tv_nsec;
	
		sb->st_mtimespec.tv_sec  = cpipe->st_mtimespec.tv_sec;
		sb->st_mtimespec.tv_nsec = cpipe->st_mtimespec.tv_nsec;

		sb->st_ctimespec.tv_sec  = cpipe->st_ctimespec.tv_sec;
		sb->st_ctimespec.tv_nsec = cpipe->st_ctimespec.tv_nsec;

		/*
	 	* Return a relatively unique inode number based on the current
	 	* address of this pipe's struct pipe.  This number may be recycled
	 	* relatively quickly.
	 	*/
		sb->st_ino = (ino_t)VM_KERNEL_ADDRPERM((uintptr_t)cpipe);
	}
	PIPE_UNLOCK(cpipe);

	/*
	 * POSIX: Left as 0: st_dev, st_nlink, st_rdev, st_flags, st_gen,
	 * st_uid, st_gid.
	 *
	 * XXX (st_dev) should be unique, but there is no device driver that
	 * XXX is associated with pipes, since they are implemented via a
	 * XXX struct fileops indirection rather than as FS objects.
	 */
	return (0);
}
示例#8
0
kern_return_t
mach_port_space_info(
	ipc_space_t			space,
	ipc_info_space_t		*infop,
	ipc_info_name_array_t		*tablep,
	mach_msg_type_number_t 		*tableCntp,
	__unused ipc_info_tree_name_array_t	*treep,
	__unused mach_msg_type_number_t         *treeCntp)
{
	ipc_info_name_t *table_info;
	vm_offset_t table_addr;
	vm_size_t table_size, table_size_needed;
	ipc_entry_t table;
	ipc_entry_num_t tsize;
	mach_port_index_t index;
	kern_return_t kr;
	vm_map_copy_t copy;


	if (space == IS_NULL)
		return KERN_INVALID_TASK;

#if !(DEVELOPMENT | DEBUG)
	const boolean_t dbg_ok = (mac_task_check_expose_task(kernel_task) == 0);
#else
	const boolean_t dbg_ok = TRUE;
#endif

	/* start with in-line memory */

	table_size = 0;

	for (;;) {
		is_read_lock(space);
		if (!is_active(space)) {
			is_read_unlock(space);
			if (table_size != 0)
				kmem_free(ipc_kernel_map,
					  table_addr, table_size);
			return KERN_INVALID_TASK;
		}

		table_size_needed =
			vm_map_round_page((space->is_table_size
					   * sizeof(ipc_info_name_t)),
					  VM_MAP_PAGE_MASK(ipc_kernel_map));

		if (table_size_needed == table_size)
			break;

		is_read_unlock(space);

		if (table_size != table_size_needed) {
			if (table_size != 0)
				kmem_free(ipc_kernel_map, table_addr, table_size);
			kr = kmem_alloc(ipc_kernel_map,	&table_addr, table_size_needed, VM_KERN_MEMORY_IPC);
			if (kr != KERN_SUCCESS) {
				return KERN_RESOURCE_SHORTAGE;
			}
			table_size = table_size_needed;
		}

	}
	/* space is read-locked and active; we have enough wired memory */

	/* get the overall space info */
	infop->iis_genno_mask = MACH_PORT_NGEN(MACH_PORT_DEAD);
	infop->iis_table_size = space->is_table_size;
	infop->iis_table_next = space->is_table_next->its_size;

	/* walk the table for this space */
	table = space->is_table;
	tsize = space->is_table_size;
	table_info = (ipc_info_name_array_t)table_addr;
	for (index = 0; index < tsize; index++) {
		ipc_info_name_t *iin = &table_info[index];
		ipc_entry_t entry = &table[index];
		ipc_entry_bits_t bits;

		bits = entry->ie_bits;
		iin->iin_name = MACH_PORT_MAKE(index, IE_BITS_GEN(bits));
		iin->iin_collision = 0;
		iin->iin_type = IE_BITS_TYPE(bits);
		if ((entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS) != MACH_PORT_TYPE_NONE &&
		    entry->ie_request != IE_REQ_NONE) {
			__IGNORE_WCASTALIGN(ipc_port_t port = (ipc_port_t) entry->ie_object);

			assert(IP_VALID(port));
			ip_lock(port);
			iin->iin_type |= ipc_port_request_type(port, iin->iin_name, entry->ie_request);
			ip_unlock(port);
		}

		iin->iin_urefs = IE_BITS_UREFS(bits);
		iin->iin_object = (dbg_ok) ? (natural_t)VM_KERNEL_ADDRPERM((uintptr_t)entry->ie_object) : 0;
		iin->iin_next = entry->ie_next;
		iin->iin_hash = entry->ie_index;
	}

	is_read_unlock(space);

	/* prepare the table out-of-line data for return */
	if (table_size > 0) {
		vm_size_t used_table_size;

		used_table_size = infop->iis_table_size * sizeof(ipc_info_name_t);
		if (table_size > used_table_size)
			bzero((char *)&table_info[infop->iis_table_size],
			      table_size - used_table_size);

		kr = vm_map_unwire(
			ipc_kernel_map,
			vm_map_trunc_page(table_addr,
					  VM_MAP_PAGE_MASK(ipc_kernel_map)),
			vm_map_round_page(table_addr + table_size,
					  VM_MAP_PAGE_MASK(ipc_kernel_map)),
			FALSE);
		assert(kr == KERN_SUCCESS);
		kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)table_addr, 
				   (vm_map_size_t)used_table_size, TRUE, &copy);
		assert(kr == KERN_SUCCESS);
		*tablep = (ipc_info_name_t *)copy;
		*tableCntp = infop->iis_table_size;
	} else {
		*tablep = (ipc_info_name_t *)0;
		*tableCntp = 0;
	}

	/* splay tree is obsolete, no work to do... */
	*treep = (ipc_info_tree_name_t *)0;
	*treeCntp = 0;
	return KERN_SUCCESS;
}
示例#9
0
errno_t
fill_socketinfo(struct socket *so, struct socket_info *si)
{
	errno_t error = 0;
	int domain;
	short type;
	short protocol;

	socket_lock(so, 0);

	si->soi_kind = SOCKINFO_GENERIC;

	fill_common_sockinfo(so, si);

	if (so->so_pcb == NULL || so->so_proto == 0 ||
	    so->so_proto->pr_domain == NULL)
		goto out;

	/*
	 * The kind of socket is determined by the triplet
	 * {domain, type, protocol}
	 */
	domain = SOCK_DOM(so);
	type = SOCK_TYPE(so);
	protocol = SOCK_PROTO(so);
	switch (domain) {
	case PF_INET:
	case PF_INET6: {
		struct in_sockinfo *insi = &si->soi_proto.pri_in;
		struct inpcb *inp = (struct inpcb *)so->so_pcb;

		si->soi_kind = SOCKINFO_IN;

		insi->insi_fport = inp->inp_fport;
		insi->insi_lport = inp->inp_lport;
		insi->insi_gencnt = inp->inp_gencnt;
		insi->insi_flags = inp->inp_flags;
		insi->insi_vflag = inp->inp_vflag;
		insi->insi_ip_ttl = inp->inp_ip_ttl;
		insi->insi_faddr.ina_6 = inp->inp_dependfaddr.inp6_foreign;
		insi->insi_laddr.ina_6 = inp->inp_dependladdr.inp6_local;
		insi->insi_v4.in4_tos = inp->inp_depend4.inp4_ip_tos;
		insi->insi_v6.in6_hlim = 0;
		insi->insi_v6.in6_cksum = inp->inp_depend6.inp6_cksum;
		insi->insi_v6.in6_ifindex = 0;
		insi->insi_v6.in6_hops = inp->inp_depend6.inp6_hops;

		if (type == SOCK_STREAM && (protocol == 0 ||
		    protocol == IPPROTO_TCP) && inp->inp_ppcb != NULL) {
			struct tcp_sockinfo *tcpsi = &si->soi_proto.pri_tcp;
			struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb;

			si->soi_kind = SOCKINFO_TCP;

			tcpsi->tcpsi_state = tp->t_state;
			tcpsi->tcpsi_timer[TCPT_REXMT] =
			    tp->t_timer[TCPT_REXMT];
			tcpsi->tcpsi_timer[TCPT_PERSIST] =
			    tp->t_timer[TCPT_PERSIST];
			tcpsi->tcpsi_timer[TCPT_KEEP] = tp->t_timer[TCPT_KEEP];
			tcpsi->tcpsi_timer[TCPT_2MSL] = tp->t_timer[TCPT_2MSL];
			tcpsi->tcpsi_mss = tp->t_maxseg;
			tcpsi->tcpsi_flags = tp->t_flags;
			tcpsi->tcpsi_tp =
			    (u_int64_t)VM_KERNEL_ADDRPERM(tp);
		}
		break;
	}
	case PF_UNIX: {
		struct unpcb *unp = (struct unpcb *)so->so_pcb;
		struct un_sockinfo *unsi = &si->soi_proto.pri_un;

		si->soi_kind = SOCKINFO_UN;

		unsi->unsi_conn_pcb =
		    (uint64_t)VM_KERNEL_ADDRPERM(unp->unp_conn);
		if (unp->unp_conn)
			unsi->unsi_conn_so = (uint64_t)
			    VM_KERNEL_ADDRPERM(unp->unp_conn->unp_socket);

		if (unp->unp_addr) {
			size_t	addrlen = unp->unp_addr->sun_len;

			if (addrlen > SOCK_MAXADDRLEN)
				addrlen = SOCK_MAXADDRLEN;
			bcopy(unp->unp_addr, &unsi->unsi_addr, addrlen);
		}
		if (unp->unp_conn && unp->unp_conn->unp_addr) {
			size_t	addrlen = unp->unp_conn->unp_addr->sun_len;

			if (addrlen > SOCK_MAXADDRLEN)
				addrlen = SOCK_MAXADDRLEN;
			bcopy(unp->unp_conn->unp_addr, &unsi->unsi_caddr,
			    addrlen);
		}
		break;
	}
	case PF_NDRV: {
		struct ndrv_cb *ndrv_cb = (struct ndrv_cb *)so->so_pcb;
		struct ndrv_info *ndrvsi = &si->soi_proto.pri_ndrv;

		si->soi_kind = SOCKINFO_NDRV;

		/* TDB lock ifnet ???? */
		if (ndrv_cb->nd_if != 0) {
			struct ifnet *ifp = ndrv_cb->nd_if;

			ndrvsi->ndrvsi_if_family = ifp->if_family;
			ndrvsi->ndrvsi_if_unit = ifp->if_unit;
			strlcpy(ndrvsi->ndrvsi_if_name, ifp->if_name, IFNAMSIZ);
		}
		break;
	}
	case PF_SYSTEM:
		if (SOCK_PROTO(so) == SYSPROTO_EVENT) {
			struct kern_event_pcb *ev_pcb =
			    (struct kern_event_pcb *)so->so_pcb;
			struct kern_event_info *kesi =
			    &si->soi_proto.pri_kern_event;

			si->soi_kind = SOCKINFO_KERN_EVENT;

			kesi->kesi_vendor_code_filter =
			    ev_pcb->evp_vendor_code_filter;
			kesi->kesi_class_filter = ev_pcb->evp_class_filter;
			kesi->kesi_subclass_filter = ev_pcb->evp_subclass_filter;

		} else if (SOCK_PROTO(so) == SYSPROTO_CONTROL) {
			struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
			struct kern_ctl_info *kcsi =
			    &si->soi_proto.pri_kern_ctl;
			struct kctl *kctl = kcb->kctl;

			si->soi_kind = SOCKINFO_KERN_CTL;

			if (kctl == 0)
				break;
			kcsi->kcsi_id = kctl->id;
			kcsi->kcsi_reg_unit = kctl->id;
			kcsi->kcsi_flags = kctl->flags;
			kcsi->kcsi_recvbufsize = kctl->recvbufsize;
			kcsi->kcsi_sendbufsize = kctl->sendbufsize;
			kcsi->kcsi_unit = kcb->unit;
			strlcpy(kcsi->kcsi_name, kctl->name, MAX_KCTL_NAME);
		}
		break;

	case PF_ROUTE:
	case PF_PPP:
	default:
		break;
	}
out:
	socket_unlock(so, 0);

	return (error);
}
示例#10
0
void
ah4_input(struct mbuf *m, int off)
{
	struct ip *ip;
	struct ah *ah;
	u_int32_t spi;
	const struct ah_algorithm *algo;
	size_t siz;
	size_t siz1;
	u_char *cksum;
	struct secasvar *sav = NULL;
	u_int16_t nxt;
	size_t hlen;
	size_t stripsiz = 0;
	sa_family_t ifamily;

	if (m->m_len < off + sizeof(struct newah)) {
		m = m_pullup(m, off + sizeof(struct newah));
		if (!m) {
			ipseclog((LOG_DEBUG, "IPv4 AH input: can't pullup;"
				"dropping the packet for simplicity\n"));
			IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
			goto fail;
		}
	}

	/* Expect 32-bit aligned data pointer on strict-align platforms */
	MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);

	ip = mtod(m, struct ip *);
	ah = (struct ah *)(void *)(((caddr_t)ip) + off);
	nxt = ah->ah_nxt;
#ifdef _IP_VHL
	hlen = IP_VHL_HL(ip->ip_vhl) << 2;
#else
	hlen = ip->ip_hl << 2;
#endif

	/* find the sassoc. */
	spi = ah->ah_spi;

	if ((sav = key_allocsa(AF_INET,
	                      (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst,
	                      IPPROTO_AH, spi)) == 0) {
		ipseclog((LOG_WARNING,
		    "IPv4 AH input: no key association found for spi %u\n",
		    (u_int32_t)ntohl(spi)));
		IPSEC_STAT_INCREMENT(ipsecstat.in_nosa);
		goto fail;
	}
	KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
	    printf("DP ah4_input called to allocate SA:0x%llx\n",
	    (uint64_t)VM_KERNEL_ADDRPERM(sav)));
	if (sav->state != SADB_SASTATE_MATURE
	 && sav->state != SADB_SASTATE_DYING) {
		ipseclog((LOG_DEBUG,
		    "IPv4 AH input: non-mature/dying SA found for spi %u\n",
		    (u_int32_t)ntohl(spi)));
		IPSEC_STAT_INCREMENT(ipsecstat.in_badspi);
		goto fail;
	}

	algo = ah_algorithm_lookup(sav->alg_auth);
	if (!algo) {
		ipseclog((LOG_DEBUG, "IPv4 AH input: "
		    "unsupported authentication algorithm for spi %u\n",
		    (u_int32_t)ntohl(spi)));
		IPSEC_STAT_INCREMENT(ipsecstat.in_badspi);
		goto fail;
	}

	siz = (*algo->sumsiz)(sav);
	siz1 = ((siz + 3) & ~(4 - 1));

	/*
	 * sanity checks for header, 1.
	 */
    {
	int sizoff;

	sizoff = (sav->flags & SADB_X_EXT_OLD) ? 0 : 4;

	/*
	 * Here, we do not do "siz1 == siz".  This is because the way
	 * RFC240[34] section 2 is written.  They do not require truncation
	 * to 96 bits.
	 * For example, Microsoft IPsec stack attaches 160 bits of
	 * authentication data for both hmac-md5 and hmac-sha1.  For hmac-sha1,
	 * 32 bits of padding is attached.
	 *
	 * There are two downsides to this specification.
	 * They have no real harm, however, they leave us fuzzy feeling.
	 * - if we attach more than 96 bits of authentication data onto AH,
	 *   we will never notice about possible modification by rogue
	 *   intermediate nodes.
	 *   Since extra bits in AH checksum is never used, this constitutes
	 *   no real issue, however, it is wacky.
	 * - even if the peer attaches big authentication data, we will never
	 *   notice the difference, since longer authentication data will just
	 *   work.
	 *
	 * We may need some clarification in the spec.
	 */
	if (siz1 < siz) {
		ipseclog((LOG_NOTICE, "sum length too short in IPv4 AH input "
		    "(%lu, should be at least %lu): %s\n",
		    (u_int32_t)siz1, (u_int32_t)siz,
		    ipsec4_logpacketstr(ip, spi)));
		IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
		goto fail;
	}
	if ((ah->ah_len << 2) - sizoff != siz1) {
		ipseclog((LOG_NOTICE, "sum length mismatch in IPv4 AH input "
		    "(%d should be %lu): %s\n",
		    (ah->ah_len << 2) - sizoff, (u_int32_t)siz1,
		    ipsec4_logpacketstr(ip, spi)));
		IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
		goto fail;
	}

	if (m->m_len < off + sizeof(struct ah) + sizoff + siz1) {
		m = m_pullup(m, off + sizeof(struct ah) + sizoff + siz1);
		if (!m) {
			ipseclog((LOG_DEBUG, "IPv4 AH input: can't pullup\n"));
			IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
			goto fail;
		}
		/* Expect 32-bit aligned data ptr on strict-align platforms */
		MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);

		ip = mtod(m, struct ip *);
		ah = (struct ah *)(void *)(((caddr_t)ip) + off);
	}
    }