Ejemplo n.º 1
0
Archivo: in6_src.c Proyecto: Prajna/xnu
/*
 * Default hop limit selection. The precedence is as follows:
 * 1. Hoplimit value specified via ioctl.
 * 2. (If the outgoing interface is detected) the current
 *     hop limit of the interface specified by router advertisement.
 * 3. The system default hoplimit.
*/
int
in6_selecthlim(
	struct in6pcb *in6p,
	struct ifnet *ifp)
{
	if (in6p && in6p->in6p_hops >= 0) {
		return(in6p->in6p_hops);
	} else {
		lck_rw_lock_shared(nd_if_rwlock);
		if (ifp && ifp->if_index < nd_ifinfo_indexlim) {
			u_int8_t chlim = nd_ifinfo[ifp->if_index].chlim;
			lck_rw_done(nd_if_rwlock);
			return (chlim);
		} else {
			lck_rw_done(nd_if_rwlock);
			return(ip6_defhlim);
		}
	}
}
Ejemplo n.º 2
0
Archivo: if_pflog.c Proyecto: argp/xnu
static int
pflog_remove(struct ifnet *ifp)
{
	int error = 0;
	struct pflog_softc *pflogif = NULL;

	lck_rw_lock_shared(pf_perim_lock);
	lck_mtx_lock(pf_lock);
	pflogif = ifp->if_softc;

	if (pflogif == NULL ||
	    (pflogif->sc_flags & IFPFLF_DETACHING) != 0) {
		error = EINVAL;
		goto done;
	}

	pflogif->sc_flags |= IFPFLF_DETACHING;
	LIST_REMOVE(pflogif, sc_list);
done:
	lck_mtx_unlock(pf_lock);
	lck_rw_done(pf_perim_lock);
	return error;
}
Ejemplo n.º 3
0
void
lock_read(
	register lock_t	* l)
{
	lck_rw_lock_shared(l);
}
Ejemplo n.º 4
0
static int
vboxvfs_vnode_readdir(struct vnop_readdir_args *args)
{
    vboxvfs_mount_t *pMount;
    vboxvfs_vnode_t *pVnodeData;
    SHFLDIRINFO     *Info;
    uint32_t         cbInfo;
    mount_t          mp;
    vnode_t          vnode;
    struct uio      *uio;

    int rc = 0, rc2;

    PDEBUG("Reading directory...");

    AssertReturn(args,              EINVAL);
    AssertReturn(args->a_eofflag,   EINVAL);
    AssertReturn(args->a_numdirent, EINVAL);

    uio             = args->a_uio;                             AssertReturn(uio,        EINVAL);
    vnode           = args->a_vp;                              AssertReturn(vnode,      EINVAL); AssertReturn(vnode_isdir(vnode), EINVAL);
    pVnodeData      = (vboxvfs_vnode_t *)vnode_fsnode(vnode);  AssertReturn(pVnodeData, EINVAL);
    mp              = vnode_mount(vnode);                      AssertReturn(mp,         EINVAL);
    pMount = (vboxvfs_mount_t *)vfs_fsprivate(mp);             AssertReturn(pMount,     EINVAL);

    lck_rw_lock_shared(pVnodeData->pLock);

    cbInfo = sizeof(Info) + MAXPATHLEN;
    Info   = (SHFLDIRINFO *)RTMemAllocZ(cbInfo);
    if (!Info)
    {
        PDEBUG("No memory to allocate internal data");
        lck_rw_unlock_shared(pVnodeData->pLock);
        return ENOMEM;
    }

    uint32_t index = (uint32_t)uio_offset(uio) / (uint32_t)sizeof(struct dirent);
    uint32_t cFiles = 0;

    PDEBUG("Exploring VBoxVFS directory (%s), handle (0x%.8X), offset (0x%X), count (%d)", (char *)pVnodeData->pPath->String.utf8, (int)pVnodeData->pHandle, index, uio_iovcnt(uio));

    /* Currently, there is a problem when VbglR0SfDirInfo() is not able to
     * continue retrieve directory content if the same VBoxVFS handle is used.
     * This macro forces to use a new handle in readdir() callback. If enabled,
     * the original handle (obtained in open() callback is ignored). */

    SHFLHANDLE Handle;
    rc = vboxvfs_open_internal(pMount,
                               pVnodeData->pPath,
                               SHFL_CF_DIRECTORY | SHFL_CF_ACCESS_READ | SHFL_CF_ACT_OPEN_IF_EXISTS | SHFL_CF_ACT_FAIL_IF_NEW,
                               &Handle);
    if (rc != 0)
    {
        PDEBUG("Unable to open dir: %d", rc);
        RTMemFree(Info);
        lck_rw_unlock_shared(pVnodeData->pLock);
        return rc;
    }

#if 0
    rc = VbglR0SfDirInfo(&g_vboxSFClient, &pMount->pMap, Handle, 0, 0, index, &cbInfo, (PSHFLDIRINFO)Info, &cFiles);
#else
    SHFLSTRING *pMask = vboxvfs_construct_shflstring("*", strlen("*"));
    if (pMask)
    {
        for (uint32_t cSkip = 0; (cSkip < index + 1) && (rc == VINF_SUCCESS); cSkip++)
        {
            //rc = VbglR0SfDirInfo(&g_vboxSFClient, &pMount->pMap, Handle, 0 /* pMask */, 0 /* SHFL_LIST_RETURN_ONE */, 0, &cbInfo, (PSHFLDIRINFO)Info, &cFiles);

            uint32_t cbReturned = cbInfo;
            //rc = VbglR0SfDirInfo(&g_vboxSFClient, &pMount->pMap, Handle, pMask, SHFL_LIST_RETURN_ONE, 0, &cbReturned, (PSHFLDIRINFO)Info, &cFiles);
            rc = VbglR0SfDirInfo(&g_vboxSFClient, &pMount->pMap, Handle, 0, SHFL_LIST_RETURN_ONE, 0,
                                 &cbReturned, (PSHFLDIRINFO)Info, &cFiles);

        }

        PDEBUG("read %d files", cFiles);
        RTMemFree(pMask);
    }
    else
    {
        PDEBUG("Can't alloc mask");
        rc = ENOMEM;
    }
#endif
    rc2 = vboxvfs_close_internal(pMount, Handle);
    if (rc2 != 0)
    {
        PDEBUG("Unable to close directory: %s: %d",
               pVnodeData->pPath->String.utf8,
               rc2);
    }

    switch (rc)
    {
        case VINF_SUCCESS:
        {
            rc = vboxvfs_vnode_readdir_copy_data((ino_t)(index + 1), Info, uio, args->a_numdirent);
            break;
        }

        case VERR_NO_MORE_FILES:
        {
            PDEBUG("No more entries in directory");
            *(args->a_eofflag) = 1;
            break;
        }

        default:
        {
            PDEBUG("VbglR0SfDirInfo() for item #%d has failed: %d", (int)index, (int)rc);
            rc = EINVAL;
            break;
        }
    }

    RTMemFree(Info);
    lck_rw_unlock_shared(pVnodeData->pLock);

    return rc;
}
Ejemplo n.º 5
0
static int
vboxvfs_vnode_getattr(struct vnop_getattr_args *args)
{
    vboxvfs_mount_t   *pMount;
    struct vnode_attr *vnode_args;
    vboxvfs_vnode_t   *pVnodeData;

    struct timespec timespec;

    SHFLFSOBJINFO Info;
    mount_t       mp;
    vnode_t       vnode;
    int           rc;

    PDEBUG("Getting vnode attribute...");

    AssertReturn(args, EINVAL);

    vnode                = args->a_vp;                                   AssertReturn(vnode,           EINVAL);
    vnode_args           = args->a_vap;                                  AssertReturn(vnode_args,      EINVAL);
    mp                   = vnode_mount(vnode);                           AssertReturn(mp,              EINVAL);
    pMount      = (vboxvfs_mount_t *)vfs_fsprivate(mp);     AssertReturn(pMount, EINVAL);
    pVnodeData           = (vboxvfs_vnode_t *)vnode_fsnode(vnode);  AssertReturn(pVnodeData,      EINVAL);

    lck_rw_lock_shared(pVnodeData->pLock);

    rc = vboxvfs_get_info_internal(mp, pVnodeData->pPath, &Info);
    if (rc == 0)
    {
        /* Set timestamps */
        RTTimeSpecGetTimespec(&Info.BirthTime,        &timespec); VATTR_RETURN(vnode_args, va_create_time, timespec);
        RTTimeSpecGetTimespec(&Info.AccessTime,       &timespec); VATTR_RETURN(vnode_args, va_access_time, timespec);
        RTTimeSpecGetTimespec(&Info.ModificationTime, &timespec); VATTR_RETURN(vnode_args, va_modify_time, timespec);
        RTTimeSpecGetTimespec(&Info.ChangeTime,       &timespec); VATTR_RETURN(vnode_args, va_change_time, timespec);
        VATTR_CLEAR_ACTIVE(vnode_args, va_backup_time);

        /* Set owner info. */
        VATTR_RETURN(vnode_args, va_uid, pMount->owner);
        VATTR_CLEAR_ACTIVE(vnode_args, va_gid);

        /* Access mode and flags */
        VATTR_RETURN(vnode_args, va_mode,  vboxvfs_h2g_mode_inernal(Info.Attr.fMode));
        VATTR_RETURN(vnode_args, va_flags, Info.Attr.u.Unix.fFlags);

        /* The current generation number (0 if this information is not available) */
        VATTR_RETURN(vnode_args, va_gen, Info.Attr.u.Unix.GenerationId);

        VATTR_RETURN(vnode_args, va_rdev,  0);
        VATTR_RETURN(vnode_args, va_nlink, 2);

        VATTR_RETURN(vnode_args, va_data_size, sizeof(struct dirent)); /* Size of data returned per each readdir() request */

        /* Hope, when it overflows nothing catastrophical will heppen! If we will not assign
         * a uniq va_fileid to each vnode, `ls`, 'find' (and simmilar tools that uses fts_read() calls) will think that
         * each sub-directory is self-cycled. */
        VATTR_RETURN(vnode_args, va_fileid, (pMount->cFileIdCounter++));

        /* Not supported */
        VATTR_CLEAR_ACTIVE(vnode_args, va_linkid);
        VATTR_CLEAR_ACTIVE(vnode_args, va_parentid);
        VATTR_CLEAR_ACTIVE(vnode_args, va_fsid);
        VATTR_CLEAR_ACTIVE(vnode_args, va_filerev);

        /* Not present on 10.6 */
        //VATTR_CLEAR_ACTIVE(vnode_args, va_addedtime);

        /* todo: take care about va_encoding (file name encoding) */
        VATTR_CLEAR_ACTIVE(vnode_args, va_encoding);
        /* todo: take care about: va_acl */
        VATTR_CLEAR_ACTIVE(vnode_args, va_acl);

        VATTR_CLEAR_ACTIVE(vnode_args, va_name);
        VATTR_CLEAR_ACTIVE(vnode_args, va_uuuid);
        VATTR_CLEAR_ACTIVE(vnode_args, va_guuid);

        VATTR_CLEAR_ACTIVE(vnode_args, va_total_size);
        VATTR_CLEAR_ACTIVE(vnode_args, va_total_alloc);
        VATTR_CLEAR_ACTIVE(vnode_args, va_data_alloc);
        VATTR_CLEAR_ACTIVE(vnode_args, va_iosize);

        VATTR_CLEAR_ACTIVE(vnode_args, va_nchildren);
        VATTR_CLEAR_ACTIVE(vnode_args, va_dirlinkcount);
    }
    else
    {
        PDEBUG("getattr: unable to get VBoxVFS object info");
    }

    lck_rw_unlock_shared(pVnodeData->pLock);

    return rc;
}
Ejemplo n.º 6
0
Archivo: in6_src.c Proyecto: Prajna/xnu
struct in6_addr *
in6_selectsrc(struct sockaddr_in6 *dstsock, struct ip6_pktopts *opts,
    struct inpcb *inp, struct route_in6 *ro,
    struct ifnet **ifpp, struct in6_addr *src_storage, unsigned int ifscope,
    int *errorp)
{
	struct in6_addr dst;
	struct ifnet *ifp = NULL;
	struct in6_ifaddr *ia = NULL, *ia_best = NULL;
	struct in6_pktinfo *pi = NULL;
	int dst_scope = -1, best_scope = -1, best_matchlen = -1;
	struct in6_addrpolicy *dst_policy = NULL, *best_policy = NULL;
	u_int32_t odstzone;
	int prefer_tempaddr;
	struct ip6_moptions *mopts;
	struct timeval timenow;
	unsigned int nocell;
	boolean_t islocal = FALSE;

	getmicrotime(&timenow);

	dst = dstsock->sin6_addr; /* make a copy for local operation */
	*errorp = 0;
	if (ifpp != NULL)
		*ifpp = NULL;

	if (inp != NULL) {
		mopts = inp->in6p_moptions;
		nocell = (inp->inp_flags & INP_NO_IFT_CELLULAR) ? 1 : 0;
	} else {
		mopts = NULL;
		nocell = 0;
	}

	/*
	 * If the source address is explicitly specified by the caller,
	 * check if the requested source address is indeed a unicast address
	 * assigned to the node, and can be used as the packet's source
	 * address.  If everything is okay, use the address as source.
	 */
	if (opts && (pi = opts->ip6po_pktinfo) &&
	    !IN6_IS_ADDR_UNSPECIFIED(&pi->ipi6_addr)) {
		struct sockaddr_in6 srcsock;
		struct in6_ifaddr *ia6;

		/* get the outgoing interface */
		if ((*errorp = in6_selectif(dstsock, opts, mopts, ro, ifscope,
		    nocell, &ifp)) != 0) {
			return (NULL);
		}

		/*
		 * determine the appropriate zone id of the source based on
		 * the zone of the destination and the outgoing interface.
		 * If the specified address is ambiguous wrt the scope zone,
		 * the interface must be specified; otherwise, ifa_ifwithaddr()
		 * will fail matching the address.
		 */
		bzero(&srcsock, sizeof(srcsock));
		srcsock.sin6_family = AF_INET6;
		srcsock.sin6_len = sizeof(srcsock);
		srcsock.sin6_addr = pi->ipi6_addr;
		if (ifp) {
			*errorp = in6_setscope(&srcsock.sin6_addr, ifp, NULL);
			if (*errorp != 0) {
				ifnet_release(ifp);
				return (NULL);
			}
		}
		ia6 = (struct in6_ifaddr *)ifa_ifwithaddr((struct sockaddr *)(&srcsock));
		if (ia6 == NULL) {
			*errorp = EADDRNOTAVAIL;
			if (ifp != NULL)
				ifnet_release(ifp);
			return (NULL);
		}
		IFA_LOCK_SPIN(&ia6->ia_ifa);
		if ((ia6->ia6_flags & (IN6_IFF_ANYCAST | IN6_IFF_NOTREADY)) ||
		    (nocell && (ia6->ia_ifa.ifa_ifp->if_type == IFT_CELLULAR))) {
			IFA_UNLOCK(&ia6->ia_ifa);
			IFA_REMREF(&ia6->ia_ifa);
			*errorp = EADDRNOTAVAIL;
			if (ifp != NULL)
				ifnet_release(ifp);
			return (NULL);
		}

		*src_storage = satosin6(&ia6->ia_addr)->sin6_addr;
		IFA_UNLOCK(&ia6->ia_ifa);
		IFA_REMREF(&ia6->ia_ifa);
		if (ifpp != NULL) {
			/* if ifp is non-NULL, refcnt held in in6_selectif() */
			*ifpp = ifp;
		} else if (ifp != NULL) {
			ifnet_release(ifp);
		}
		return (src_storage);
	}

	/*
	 * Otherwise, if the socket has already bound the source, just use it.
	 */
	if (inp != NULL && !IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) 
		return (&inp->in6p_laddr);

	/*
	 * If the address is not specified, choose the best one based on
	 * the outgoing interface and the destination address.
	 */

	/* get the outgoing interface */
	if ((*errorp = in6_selectif(dstsock, opts, mopts, ro, ifscope, nocell,
	    &ifp)) != 0)
		return (NULL);

#ifdef DIAGNOSTIC
	if (ifp == NULL)	/* this should not happen */
		panic("in6_selectsrc: NULL ifp");
#endif
	*errorp = in6_setscope(&dst, ifp, &odstzone);
	if (*errorp != 0) {
		if (ifp != NULL)
			ifnet_release(ifp);
		return (NULL);
	}
	lck_rw_lock_shared(&in6_ifaddr_rwlock);

	for (ia = in6_ifaddrs; ia; ia = ia->ia_next) {
		int new_scope = -1, new_matchlen = -1;
		struct in6_addrpolicy *new_policy = NULL;
		u_int32_t srczone, osrczone, dstzone;
		struct in6_addr src;
		struct ifnet *ifp1 = ia->ia_ifp;

		IFA_LOCK(&ia->ia_ifa);
		/*
		 * We'll never take an address that breaks the scope zone
		 * of the destination.  We also skip an address if its zone
		 * does not contain the outgoing interface.
		 * XXX: we should probably use sin6_scope_id here.
		 */
		if (in6_setscope(&dst, ifp1, &dstzone) ||
		    odstzone != dstzone)
			goto next;

		src = ia->ia_addr.sin6_addr;
		if (in6_setscope(&src, ifp, &osrczone) ||
		    in6_setscope(&src, ifp1, &srczone) ||
		    osrczone != srczone)
			goto next;

		/* avoid unusable addresses */
		if ((ia->ia6_flags &
		     (IN6_IFF_NOTREADY | IN6_IFF_ANYCAST | IN6_IFF_DETACHED)))
			goto next;

		if (!ip6_use_deprecated && IFA6_IS_DEPRECATED(ia))
			goto next;

		/* Rule 1: Prefer same address */
		if (IN6_ARE_ADDR_EQUAL(&dst, &ia->ia_addr.sin6_addr))
			BREAK(1); /* there should be no better candidate */

		if (ia_best == NULL)
			REPLACE(0);

		/* Rule 2: Prefer appropriate scope */
		if (dst_scope < 0)
			dst_scope = in6_addrscope(&dst);
		new_scope = in6_addrscope(&ia->ia_addr.sin6_addr);
		if (IN6_ARE_SCOPE_CMP(best_scope, new_scope) < 0) {
			if (IN6_ARE_SCOPE_CMP(best_scope, dst_scope) < 0)
				REPLACE(2);
			NEXTSRC(2);
		} else if (IN6_ARE_SCOPE_CMP(new_scope, best_scope) < 0) {
			if (IN6_ARE_SCOPE_CMP(new_scope, dst_scope) < 0)
				NEXTSRC(2);
			REPLACE(2);
		}

		/*
		 * Rule 3: Avoid deprecated addresses.  Note that the case of
		 * !ip6_use_deprecated is already rejected above.
		 */
		if (!IFA6_IS_DEPRECATED(ia_best) && IFA6_IS_DEPRECATED(ia))
			NEXTSRC(3);
		if (IFA6_IS_DEPRECATED(ia_best) && !IFA6_IS_DEPRECATED(ia))
			REPLACE(3);

		/* Rule 4: Prefer home addresses */
		/*
		 * XXX: This is a TODO.  We should probably merge the MIP6
		 * case above.
		 */

		/* Rule 5: Prefer outgoing interface */
		if (ia_best->ia_ifp == ifp && ia->ia_ifp != ifp)
			NEXTSRC(5);
		if (ia_best->ia_ifp != ifp && ia->ia_ifp == ifp)
			REPLACE(5);

		/*
		 * Rule 6: Prefer matching label
		 * Note that best_policy should be non-NULL here.
		 */
		if (dst_policy == NULL)
			dst_policy = in6_addrsel_lookup_policy(dstsock);
		if (dst_policy->label != ADDR_LABEL_NOTAPP) {
			new_policy = in6_addrsel_lookup_policy(&ia->ia_addr);
			if (dst_policy->label == best_policy->label &&
			    dst_policy->label != new_policy->label)
				NEXTSRC(6);
			if (dst_policy->label != best_policy->label &&
			    dst_policy->label == new_policy->label)
				REPLACE(6);
		}

		/*
		 * Rule 7: Prefer public addresses.
		 * We allow users to reverse the logic by configuring
		 * a sysctl variable, so that privacy conscious users can
		 * always prefer temporary addresses.
		 * Don't use temporary addresses for local destinations or
		 * for multicast addresses unless we were passed in an option.
		 */
		if (IN6_IS_ADDR_MULTICAST(&dst) ||
		    in6_matchlen(&ia_best->ia_addr.sin6_addr, &dst) >=
		    in6_mask2len(&ia_best->ia_prefixmask.sin6_addr, NULL))
			islocal = TRUE;
		if (opts == NULL ||
		    opts->ip6po_prefer_tempaddr == IP6PO_TEMPADDR_SYSTEM) {
			prefer_tempaddr = islocal ? 0 : ip6_prefer_tempaddr;
		} else if (opts->ip6po_prefer_tempaddr ==
		    IP6PO_TEMPADDR_NOTPREFER) {
			prefer_tempaddr = 0;
		} else
			prefer_tempaddr = 1;
		if (!(ia_best->ia6_flags & IN6_IFF_TEMPORARY) &&
		    (ia->ia6_flags & IN6_IFF_TEMPORARY)) {
			if (prefer_tempaddr)
				REPLACE(7);
			else
				NEXTSRC(7);
		}
		if ((ia_best->ia6_flags & IN6_IFF_TEMPORARY) &&
		    !(ia->ia6_flags & IN6_IFF_TEMPORARY)) {
			if (prefer_tempaddr)
				NEXTSRC(7);
			else
				REPLACE(7);
		}

		/*
		 * Rule 8: prefer addresses on alive interfaces.
		 * This is a KAME specific rule.
		 */
		if ((ia_best->ia_ifp->if_flags & IFF_UP) &&
		    !(ia->ia_ifp->if_flags & IFF_UP))
			NEXTSRC(8);
		if (!(ia_best->ia_ifp->if_flags & IFF_UP) &&
		    (ia->ia_ifp->if_flags & IFF_UP))
			REPLACE(8);

		/*
		 * Rule 14: Use longest matching prefix.
		 * Note: in the address selection draft, this rule is
		 * documented as "Rule 8".  However, since it is also
		 * documented that this rule can be overridden, we assign
		 * a large number so that it is easy to assign smaller numbers
		 * to more preferred rules.
		 */
		new_matchlen = in6_matchlen(&ia->ia_addr.sin6_addr, &dst);
		if (best_matchlen < new_matchlen)
			REPLACE(14);
		if (new_matchlen < best_matchlen)
			NEXTSRC(14);

		/* Rule 15 is reserved. */

		/*
		 * Last resort: just keep the current candidate.
		 * Or, do we need more rules?
		 */
		IFA_UNLOCK(&ia->ia_ifa);
		continue;

replace:
		best_scope = (new_scope >= 0 ? new_scope :
			      in6_addrscope(&ia->ia_addr.sin6_addr));
		best_policy = (new_policy ? new_policy :
			       in6_addrsel_lookup_policy(&ia->ia_addr));
		best_matchlen = (new_matchlen >= 0 ? new_matchlen :
				 in6_matchlen(&ia->ia_addr.sin6_addr, &dst));
		IFA_ADDREF_LOCKED(&ia->ia_ifa);	/* for ia_best */
		IFA_UNLOCK(&ia->ia_ifa);
		if (ia_best != NULL)
			IFA_REMREF(&ia_best->ia_ifa);
		ia_best = ia;
		continue;

next:
		IFA_UNLOCK(&ia->ia_ifa);
		continue;

out:
		IFA_ADDREF_LOCKED(&ia->ia_ifa);	/* for ia_best */
		IFA_UNLOCK(&ia->ia_ifa);
		if (ia_best != NULL)
			IFA_REMREF(&ia_best->ia_ifa);
		ia_best = ia;
		break;
	}

	lck_rw_done(&in6_ifaddr_rwlock);

	if (nocell && ia_best != NULL &&
	    (ia_best->ia_ifa.ifa_ifp->if_type == IFT_CELLULAR)) {
		IFA_REMREF(&ia_best->ia_ifa);
		ia_best = NULL;
	}

	if ( (ia = ia_best) == NULL) {
		*errorp = EADDRNOTAVAIL;
		if (ifp != NULL)
			ifnet_release(ifp);
		return (NULL);
	}

	IFA_LOCK_SPIN(&ia->ia_ifa);
	*src_storage = satosin6(&ia->ia_addr)->sin6_addr;
	IFA_UNLOCK(&ia->ia_ifa);
	IFA_REMREF(&ia->ia_ifa);
	if (ifpp != NULL) {
		/* if ifp is non-NULL, refcnt held in in6_selectif() */
		*ifpp = ifp;
	} else if (ifp != NULL) {
		ifnet_release(ifp);
	}
	return (src_storage);
}
Ejemplo n.º 7
0
__private_extern__ void
inpcb_get_ports_used(uint32_t ifindex, int protocol, uint32_t flags,
    bitstr_t *bitfield, struct inpcbinfo *pcbinfo)
{
	struct inpcb *inp;
	struct socket *so;
	inp_gen_t gencnt;
	bool iswildcard, wildcardok, nowakeok;
	bool recvanyifonly, extbgidleok;
	bool activeonly;

	wildcardok = ((flags & INPCB_GET_PORTS_USED_WILDCARDOK) != 0);
	nowakeok = ((flags & INPCB_GET_PORTS_USED_NOWAKEUPOK) != 0);
	recvanyifonly = ((flags & INPCB_GET_PORTS_USED_RECVANYIFONLY) != 0);
	extbgidleok = ((flags & INPCB_GET_PORTS_USED_EXTBGIDLEONLY) != 0);
	activeonly = ((flags & INPCB_GET_PORTS_USED_ACTIVEONLY) != 0);

	lck_rw_lock_shared(pcbinfo->ipi_lock);
	gencnt = pcbinfo->ipi_gencnt;

	for (inp = LIST_FIRST(pcbinfo->ipi_listhead); inp;
	    inp = LIST_NEXT(inp, inp_list)) {
		uint16_t port;

		if (inp->inp_gencnt > gencnt ||
		    inp->inp_state == INPCB_STATE_DEAD ||
		    inp->inp_wantcnt == WNT_STOPUSING)
			continue;

		if ((so = inp->inp_socket) == NULL ||
		    (so->so_state & SS_DEFUNCT) ||
		    (so->so_state & SS_ISDISCONNECTED))
			continue;

		if (!(protocol == PF_UNSPEC ||
		    (protocol == PF_INET && (inp->inp_vflag & INP_IPV4)) ||
		    (protocol == PF_INET6 && (inp->inp_vflag & INP_IPV6))))
			continue;

		iswildcard = (((inp->inp_vflag & INP_IPV4) &&
		    inp->inp_laddr.s_addr == INADDR_ANY) ||
		    ((inp->inp_vflag & INP_IPV6) &&
		    IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)));

		if (!wildcardok && iswildcard)
			continue;

		if ((so->so_options & SO_NOWAKEFROMSLEEP) &&
			!nowakeok)
			continue;

		if (!(inp->inp_flags & INP_RECV_ANYIF) &&
			recvanyifonly)
			continue;

		if (!(so->so_flags1 & SOF1_EXTEND_BK_IDLE_WANTED) &&
			extbgidleok)
			continue;

		if (!iswildcard &&
		    !(ifindex == 0 || inp->inp_last_outifp == NULL ||
		    ifindex == inp->inp_last_outifp->if_index))
			continue;

		if (SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP &&
		    so->so_state & SS_CANTRCVMORE)
			continue;

		if (SOCK_PROTO(inp->inp_socket) == IPPROTO_TCP) {
			struct  tcpcb *tp = sototcpcb(inp->inp_socket);

			/*
			 * Workaround race where inp_ppcb is NULL during
			 * socket initialization
			 */
			if (tp == NULL)
				continue;

			switch (tp->t_state) {
				case TCPS_CLOSED:
					continue;
					/* NOT REACHED */
				case TCPS_LISTEN:
				case TCPS_SYN_SENT:
				case TCPS_SYN_RECEIVED:
				case TCPS_ESTABLISHED:
				case TCPS_FIN_WAIT_1:
					/*
					 * Note: FIN_WAIT_1 is an active state
					 * because we need our FIN to be
					 * acknowledged
					 */
					break;
				case TCPS_CLOSE_WAIT:
				case TCPS_CLOSING:
				case TCPS_LAST_ACK:
				case TCPS_FIN_WAIT_2:
					/*
					 * In the closing states, the connection
					 * is not idle when there is outgoing
					 * data having to be acknowledged
					 */
					if (activeonly && so->so_snd.sb_cc == 0)
						continue;
					break;
				case TCPS_TIME_WAIT:
					continue;
					/* NOT REACHED */
			}
		}
		/*
		 * Final safeguard to exclude unspecified local port
		 */
		port = ntohs(inp->inp_lport);
		if (port == 0)
			continue;
		bit_set(bitfield, port);
	}
	lck_rw_done(pcbinfo->ipi_lock);
}
Ejemplo n.º 8
0
Archivo: if_pflog.c Proyecto: argp/xnu
static int
pflog_clone_create(struct if_clone *ifc, u_int32_t unit, __unused void *params)
{
	struct pflog_softc *pflogif;
	struct ifnet_init_eparams pf_init;
	int error = 0;

	if (unit >= PFLOGIFS_MAX) {
		/* Either the interface cloner or our initializer is broken */
		panic("%s: unit (%d) exceeds max (%d)", __func__, unit,
		    PFLOGIFS_MAX);
		/* NOTREACHED */
	}

	if ((pflogif = if_clone_softc_allocate(&pflog_cloner)) == NULL) {
		error = ENOMEM;
		goto done;
	}

	bzero(&pf_init, sizeof (pf_init));
	pf_init.ver = IFNET_INIT_CURRENT_VERSION;
	pf_init.len = sizeof (pf_init);
	pf_init.flags = IFNET_INIT_LEGACY;
	pf_init.name = ifc->ifc_name;
	pf_init.unit = unit;
	pf_init.type = IFT_PFLOG;
	pf_init.family = IFNET_FAMILY_LOOPBACK;
	pf_init.output = pflogoutput;
	pf_init.demux = pflogdemux;
	pf_init.add_proto = pflogaddproto;
	pf_init.del_proto = pflogdelproto;
	pf_init.softc = pflogif;
	pf_init.ioctl = pflogioctl;
	pf_init.detach = pflogfree;

	bzero(pflogif, sizeof (*pflogif));
	pflogif->sc_unit = unit;
	pflogif->sc_flags |= IFPFLF_DETACHING;

	error = ifnet_allocate_extended(&pf_init, &pflogif->sc_if);
	if (error != 0) {
		printf("%s: ifnet_allocate failed - %d\n", __func__, error);
		if_clone_softc_deallocate(&pflog_cloner, pflogif);
		goto done;
	}

	ifnet_set_mtu(pflogif->sc_if, PFLOGMTU);
	ifnet_set_flags(pflogif->sc_if, IFF_UP, IFF_UP);

	error = ifnet_attach(pflogif->sc_if, NULL);
	if (error != 0) {
		printf("%s: ifnet_attach failed - %d\n", __func__, error);
		ifnet_release(pflogif->sc_if);
		if_clone_softc_deallocate(&pflog_cloner, pflogif);
		goto done;
	}

#if NBPFILTER > 0
	bpfattach(pflogif->sc_if, DLT_PFLOG, PFLOG_HDRLEN);
#endif

	lck_rw_lock_shared(pf_perim_lock);
	lck_mtx_lock(pf_lock);
	LIST_INSERT_HEAD(&pflogif_list, pflogif, sc_list);
	pflogifs[unit] = pflogif->sc_if;
	pflogif->sc_flags &= ~IFPFLF_DETACHING;
	lck_mtx_unlock(pf_lock);
	lck_rw_done(pf_perim_lock);

done:
	return (error);
}
Ejemplo n.º 9
0
kern_return_t
dtrace_user_probe(arm_saved_state_t *regs, unsigned int instr)
{
	/*
	 * FIXME
	 *
	 * The only call path into this method is always a user trap.
	 * We don't need to test for user trap, but should assert it.
	 */

	lck_rw_t *rwp;
	struct proc *p = current_proc();

	uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread());

	kauth_cred_uthread_update(uthread, p);

	if (((regs->cpsr & PSR_TF) && ((uint16_t) instr) == FASTTRAP_THUMB_RET_INSTR) ||
	    ((uint32_t) instr == FASTTRAP_ARM_RET_INSTR)) {
		uint8_t step = uthread->t_dtrace_step;
		uint8_t ret = uthread->t_dtrace_ret;
		user_addr_t npc = uthread->t_dtrace_npc;

		if (uthread->t_dtrace_ast) {
			printf("dtrace_user_probe() should be calling aston()\n");
			// aston(thread);
			// uthread->t_sig_check = 1;
		}

		/*
		 * Clear all user tracing flags.
		 */
		uthread->t_dtrace_ft = 0;

		/*
		 * If we weren't expecting to take a return probe trap, kill
		 * the process as though it had just executed an unassigned
		 * trap instruction.
		 */
		if (step == 0) {
			/*
			 * APPLE NOTE: We're returning KERN_FAILURE, which causes 
			 * the generic signal handling code to take over, which will effectively
			 * deliver a EXC_BAD_INSTRUCTION to the user process.
			 */
			return KERN_FAILURE;
		} 

		/*
		 * If we hit this trap unrelated to a return probe, we're
		 * just here to reset the AST flag since we deferred a signal
		 * until after we logically single-stepped the instruction we
		 * copied out.
		 */
		if (ret == 0) {
			regs->pc = npc;
			return KERN_SUCCESS;
		}

		/*
		 * We need to wait until after we've called the
		 * dtrace_return_probe_ptr function pointer to step the pc.
		 */
		rwp = &CPU->cpu_ft_lock;
		lck_rw_lock_shared(rwp);

		if (dtrace_return_probe_ptr != NULL)
			(void) (*dtrace_return_probe_ptr)(regs);
		lck_rw_unlock_shared(rwp);

		regs->pc = npc;

		return KERN_SUCCESS;
	} else {
		rwp = &CPU->cpu_ft_lock;

		/*
		 * The DTrace fasttrap provider uses a trap,
		 * FASTTRAP_{ARM,THUMB}_INSTR. We let
		 * DTrace take the first crack at handling
		 * this trap; if it's not a probe that DTrace knows about,
		 * we call into the trap() routine to handle it like a
		 * breakpoint placed by a conventional debugger.
		 */

		/*
		 * APPLE NOTE: I believe the purpose of the reader/writers lock
		 * is thus: There are times which dtrace needs to prevent calling
		 * dtrace_pid_probe_ptr(). Sun's original impl grabbed a plain
		 * mutex here. However, that serialized all probe calls, and
		 * destroyed MP behavior. So now they use a RW lock, with probes
		 * as readers, and the top level synchronization as a writer.
		 */
		lck_rw_lock_shared(rwp);
		if (dtrace_pid_probe_ptr != NULL &&
		    (*dtrace_pid_probe_ptr)(regs) == 0) {
			lck_rw_unlock_shared(rwp);
			return KERN_SUCCESS;
		}
		lck_rw_unlock_shared(rwp);

		/*
		 * If the instruction that caused the breakpoint trap doesn't
		 * look like our trap anymore, it may be that this tracepoint
		 * was removed just after the user thread executed it. In
		 * that case, return to user land to retry the instuction.
		 *
		 * Note that the PC points to the instruction that caused the fault.
		 */
		if (regs->cpsr & PSR_TF) {
			uint16_t instr_check;
			if (fuword16(regs->pc, &instr_check) == 0 && instr_check != FASTTRAP_THUMB_INSTR) {
				return KERN_SUCCESS;
			}
		} else {
			uint32_t instr_check;
			if (fuword32(regs->pc, &instr_check) == 0 && instr_check != FASTTRAP_ARM_INSTR) {
				return KERN_SUCCESS;
			}
		}
	}

	return KERN_FAILURE;
}
Ejemplo n.º 10
0
/**
 * ntfs_pagein - read a range of pages into memory
 * @ni:		ntfs inode whose data to read into the page range
 * @attr_ofs:	byte offset in the inode at which to start
 * @size:	number of bytes to read from the inode
 * @upl:	page list describing destination page range
 * @upl_ofs:	byte offset into page list at which to start
 * @flags:	flags further describing the pagein request
 *
 * Read @size bytes from the ntfs inode @ni, starting at byte offset @attr_ofs
 * into the inode, into the range of pages specified by the page list @upl,
 * starting at byte offset @upl_ofs into the page list.
 *
 * The @flags further describe the pagein request.  The following pagein flags
 * are currently defined in OSX kernel:
 *	UPL_IOSYNC	- Perform synchronous i/o.
 *	UPL_NOCOMMIT	- Do not commit/abort the page range.
 *	UPL_NORDAHEAD	- Do not perform any speculative read-ahead.
 *	IO_PASSIVE	- This is background i/o so do not throttle other i/o.
 *
 * Inside the ntfs driver we have the need to perform pageins whilst the inode
 * is locked for writing (@ni->lock) thus we cheat and set UPL_NESTED_PAGEOUT
 * in @flags when this is the case.  We make sure to clear it in @flags before
 * calling into the cluster layer so we do not accidentally cause confusion.
 *
 * For encrypted attributes we abort for now as we do not support them yet.
 *
 * For non-resident, non-compressed attributes we use cluster_pagein_ext()
 * which deals with both normal and multi sector transfer protected attributes.
 *
 * For resident attributes and non-resident, compressed attributes we read the
 * data ourselves by mapping the page list, and in the resident case, mapping
 * the mft record, looking up the attribute in it, and copying the requested
 * data from the mapped attribute into the page list, then unmapping the mft
 * record, whilst for non-resident, compressed attributes, we get the raw inode
 * and use it with ntfs_read_compressed() to read and decompress the data into
 * our mapped page list.  We then unmap the page list and finally, if
 * UPL_NOCOMMIT is not specified, we commit (success) or abort (error) the page
 * range.
 *
 * Return 0 on success and errno on error.
 *
 * Note the pages in the page list are marked busy on entry and the busy bit is
 * cleared when we commit the page range.  Thus it is perfectly safe for us to
 * fill the pages with encrypted or mst protected data and to decrypt or mst
 * deprotect in place before committing the page range.
 *
 * Adapted from cluster_pagein_ext().
 *
 * Locking: - Caller must hold an iocount reference on the vnode of @ni.
 *	    - Caller must not hold @ni->lock or if it is held it must be for
 *	      reading unless UPL_NESTED_PAGEOUT is set in @flags in which case
 *	      the caller must hold @ni->lock for reading or writing.
 */
int ntfs_pagein(ntfs_inode *ni, s64 attr_ofs, unsigned size, upl_t upl,
		upl_offset_t upl_ofs, int flags)
{
	s64 attr_size;
	u8 *kaddr;
	kern_return_t kerr;
	unsigned to_read;
	int err;
	BOOL locked = FALSE;

	ntfs_debug("Entering for mft_no 0x%llx, offset 0x%llx, size 0x%x, "
			"pagein flags 0x%x, page list offset 0x%llx.",
			(unsigned long long)ni->mft_no,
			(unsigned long long)attr_ofs, size, flags,
			(unsigned long long)upl_ofs);
	/*
	 * If the caller did not specify any i/o, then we are done.  We cannot
	 * issue an abort because we do not have a upl or we do not know its
	 * size.
	 */
	if (!upl) {
		ntfs_error(ni->vol->mp, "NULL page list passed in (error "
				"EINVAL).");
		return EINVAL;
	}
	if (S_ISDIR(ni->mode)) {
		ntfs_error(ni->vol->mp, "Called for directory vnode.");
		err = EISDIR;
		goto err;
	}
	/*
	 * Protect against changes in initialized_size and thus against
	 * truncation also unless UPL_NESTED_PAGEOUT is set in which case the
	 * caller has already taken @ni->lock for exclusive access.  We simply
	 * leave @locked to be FALSE in this case so we do not try to drop the
	 * lock later on.
	 *
	 * If UPL_NESTED_PAGEOUT is set we clear it in @flags to ensure we do
	 * not cause confusion in the cluster layer or the VM.
	 */
	if (flags & UPL_NESTED_PAGEOUT)
		flags &= ~UPL_NESTED_PAGEOUT;
	else {
		locked = TRUE;
		lck_rw_lock_shared(&ni->lock);
	}
	/* Do not allow messing with the inode once it has been deleted. */
	if (NInoDeleted(ni)) {
		/* Remove the inode from the name cache. */
		cache_purge(ni->vn);
		err = ENOENT;
		goto err;
	}
retry_pagein:
	/*
	 * We guarantee that the size in the ubc will be smaller or equal to
	 * the size in the ntfs inode thus no need to check @ni->data_size.
	 */
	attr_size = ubc_getsize(ni->vn);
	/*
	 * Only $DATA attributes can be encrypted/compressed.  Index root can
	 * have the flags set but this means to create compressed/encrypted
	 * files, not that the attribute is compressed/encrypted.  Note we need
	 * to check for AT_INDEX_ALLOCATION since this is the type of directory
	 * index inodes.
	 */
	if (ni->type != AT_INDEX_ALLOCATION) {
		/* TODO: Deny access to encrypted attributes, just like NT4. */
		if (NInoEncrypted(ni)) {
			if (ni->type != AT_DATA)
				panic("%s(): Encrypted non-data attribute.\n",
						__FUNCTION__);
			ntfs_warning(ni->vol->mp, "Denying access to "
					"encrypted attribute (EACCES).");
			err = EACCES;
			goto err;
		}
		/* Compressed data streams need special handling. */
		if (NInoNonResident(ni) && NInoCompressed(ni) && !NInoRaw(ni)) {
			if (ni->type != AT_DATA)
				panic("%s(): Compressed non-data attribute.\n",
						__FUNCTION__);
			goto compressed;
		}
	}
	/* NInoNonResident() == NInoIndexAllocPresent() */
	if (NInoNonResident(ni)) {
		int (*callback)(buf_t, void *);

		callback = NULL;
		if (NInoMstProtected(ni) || NInoEncrypted(ni))
			callback = ntfs_cluster_iodone;
		/* Non-resident, possibly mst protected, attribute. */
		err = cluster_pagein_ext(ni->vn, upl, upl_ofs, attr_ofs, size,
				attr_size, flags, callback, NULL);
		if (!err)
			ntfs_debug("Done (cluster_pagein_ext()).");
		else
			ntfs_error(ni->vol->mp, "Failed (cluster_pagein_ext(), "
					"error %d).", err);
		if (locked)
			lck_rw_unlock_shared(&ni->lock);
		return err;
	}
compressed:
	/*
	 * The attribute is resident and/or compressed.
	 *
	 * Cannot pagein from a negative offset or if we are starting beyond
	 * the end of the attribute or if the attribute offset is not page
	 * aligned or the size requested is not a multiple of PAGE_SIZE.
	 */
	if (attr_ofs < 0 || attr_ofs >= attr_size || attr_ofs & PAGE_MASK_64 ||
			size & PAGE_MASK || upl_ofs & PAGE_MASK) {
		err = EINVAL;
		goto err;
	}
	to_read = size;
	attr_size -= attr_ofs;
	if (to_read > attr_size)
		to_read = attr_size;
	/*
	 * We do not need @attr_size any more so reuse it to hold the number of
	 * bytes available in the attribute starting at offset @attr_ofs up to
	 * a maximum of the requested number of bytes rounded up to a multiple
	 * of the system page size.
	 */
	attr_size = (to_read + PAGE_MASK) & ~PAGE_MASK;
	/* Abort any pages outside the end of the attribute. */
	if (size > attr_size && !(flags & UPL_NOCOMMIT)) {
		ubc_upl_abort_range(upl, upl_ofs + attr_size, size - attr_size,
				UPL_ABORT_FREE_ON_EMPTY | UPL_ABORT_ERROR);
		/* Update @size. */
		size = attr_size;
	}
	/* To access the page list contents, we need to map the page list. */
	kerr = ubc_upl_map(upl, (vm_offset_t*)&kaddr);
	if (kerr != KERN_SUCCESS) {
		ntfs_error(ni->vol->mp, "ubc_upl_map() failed (error %d).",
				(int)kerr);
		err = EIO;
		goto err;
	}
	if (!NInoNonResident(ni)) {
		/*
		 * Read the data from the resident attribute into the page
		 * list.
		 */
		err = ntfs_resident_attr_read(ni, attr_ofs, size,
				kaddr + upl_ofs);
		if (err && err != EAGAIN)
			ntfs_error(ni->vol->mp, "ntfs_resident_attr_read() "
					"failed (error %d).", err);
	} else {
		ntfs_inode *raw_ni;
		int ioflags;

		/*
		 * Get the raw inode.  We take the inode lock shared to protect
		 * against concurrent writers as the compressed data is invalid
		 * whilst a write is in progress.
		 */
		err = ntfs_raw_inode_get(ni, LCK_RW_TYPE_SHARED, &raw_ni);
		if (err)
			ntfs_error(ni->vol->mp, "Failed to get raw inode "
					"(error %d).", err);
		else {
			if (!NInoRaw(raw_ni))
				panic("%s(): Requested raw inode but got "
						"non-raw one.\n", __FUNCTION__);
			ioflags = 0;
			if (vnode_isnocache(ni->vn) ||
					vnode_isnocache(raw_ni->vn))
				ioflags |= IO_NOCACHE;
			if (vnode_isnoreadahead(ni->vn) ||
					vnode_isnoreadahead(raw_ni->vn))
				ioflags |= IO_RAOFF;
			err = ntfs_read_compressed(ni, raw_ni, attr_ofs, size,
					kaddr + upl_ofs, NULL, ioflags);
			if (err)
				ntfs_error(ni->vol->mp,
						"ntfs_read_compressed() "
						"failed (error %d).", err);
			lck_rw_unlock_shared(&raw_ni->lock);
			(void)vnode_put(raw_ni->vn);
		}
	}
	kerr = ubc_upl_unmap(upl);
	if (kerr != KERN_SUCCESS) {
		ntfs_error(ni->vol->mp, "ubc_upl_unmap() failed (error %d).",
				(int)kerr);
		if (!err)
			err = EIO;
	}
	if (!err) {
		if (!(flags & UPL_NOCOMMIT)) {
			/* Commit the page range we brought up to date. */
			ubc_upl_commit_range(upl, upl_ofs, size,
					UPL_COMMIT_FREE_ON_EMPTY);
		}
		ntfs_debug("Done (%s).", !NInoNonResident(ni) ?
				"ntfs_resident_attr_read()" :
				"ntfs_read_compressed()");
	} else /* if (err) */ {
		/*
		 * If the attribute was converted to non-resident under our
		 * nose, retry the pagein.
		 *
		 * TODO: This may no longer be possible to happen now that we
		 * lock against changes in initialized size and thus
		 * truncation...  Revisit this issue when the write code has
		 * been written and remove the check + goto if appropriate.
		 */
		if (err == EAGAIN)
			goto retry_pagein;
err:
		if (!(flags & UPL_NOCOMMIT)) {
			int upl_flags = UPL_ABORT_FREE_ON_EMPTY;
			if (err != ENOMEM)
				upl_flags |= UPL_ABORT_ERROR;
			ubc_upl_abort_range(upl, upl_ofs, size, upl_flags);
		}
		ntfs_error(ni->vol->mp, "Failed (error %d).", err);
	}
	if (locked)
		lck_rw_unlock_shared(&ni->lock);
	return err;
}
Ejemplo n.º 11
0
kern_return_t
dtrace_user_probe(x86_saved_state_t *regs)
{
    x86_saved_state64_t *regs64;
    x86_saved_state32_t *regs32;
    int trapno;

    /*
     * FIXME!
     *
     * The only call path into this method is always a user trap.
     * We don't need to test for user trap, but should assert it.
     */
    boolean_t user_mode = TRUE;

    if (is_saved_state64(regs) == TRUE) {
        regs64 = saved_state64(regs);
        regs32 = NULL;
        trapno = regs64->isf.trapno;
        user_mode = TRUE; // By default, because xnu is 32 bit only
    } else {
        regs64 = NULL;
        regs32 = saved_state32(regs);
        if (regs32->cs & 0x03) user_mode = TRUE;
        trapno = regs32->trapno;
    }

    lck_rw_t *rwp;
    struct proc *p = current_proc();

    uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread());
    if (user_mode /*|| (rp->r_ps & PS_VM)*/) {
        /*
         * DTrace accesses t_cred in probe context.  t_cred
         * must always be either NULL, or point to a valid,
         * allocated cred structure.
         */
        kauth_cred_uthread_update(uthread, p);
    }

    if (trapno == T_DTRACE_RET) {
        uint8_t step = uthread->t_dtrace_step;
        uint8_t ret = uthread->t_dtrace_ret;
        user_addr_t npc = uthread->t_dtrace_npc;

        if (uthread->t_dtrace_ast) {
            printf("dtrace_user_probe() should be calling aston()\n");
            // aston(uthread);
            // uthread->t_sig_check = 1;
        }

        /*
         * Clear all user tracing flags.
         */
        uthread->t_dtrace_ft = 0;

        /*
         * If we weren't expecting to take a return probe trap, kill
         * the process as though it had just executed an unassigned
         * trap instruction.
         */
        if (step == 0) {
            /*
             * APPLE NOTE: We're returning KERN_FAILURE, which causes
             * the generic signal handling code to take over, which will effectively
             * deliver a EXC_BAD_INSTRUCTION to the user process.
             */
            return KERN_FAILURE;
        }

        /*
         * If we hit this trap unrelated to a return probe, we're
         * just here to reset the AST flag since we deferred a signal
         * until after we logically single-stepped the instruction we
         * copied out.
         */
        if (ret == 0) {
            if (regs64) {
                regs64->isf.rip = npc;
            } else {
                regs32->eip = npc;
            }
            return KERN_SUCCESS;
        }

        /*
         * We need to wait until after we've called the
         * dtrace_return_probe_ptr function pointer to set %pc.
         */
        rwp = &CPU->cpu_ft_lock;
        lck_rw_lock_shared(rwp);

        if (dtrace_return_probe_ptr != NULL)
            (void) (*dtrace_return_probe_ptr)(regs);
        lck_rw_unlock_shared(rwp);

        if (regs64) {
            regs64->isf.rip = npc;
        } else {
            regs32->eip = npc;
        }

        return KERN_SUCCESS;
    } else if (trapno == T_INT3) {
        uint8_t instr;
        rwp = &CPU->cpu_ft_lock;

        /*
         * The DTrace fasttrap provider uses the breakpoint trap
         * (int 3). We let DTrace take the first crack at handling
         * this trap; if it's not a probe that DTrace knowns about,
         * we call into the trap() routine to handle it like a
         * breakpoint placed by a conventional debugger.
         */

        /*
         * APPLE NOTE: I believe the purpose of the reader/writers lock
         * is thus: There are times which dtrace needs to prevent calling
         * dtrace_pid_probe_ptr(). Sun's original impl grabbed a plain
         * mutex here. However, that serialized all probe calls, and
         * destroyed MP behavior. So now they use a RW lock, with probes
         * as readers, and the top level synchronization as a writer.
         */
        lck_rw_lock_shared(rwp);
        if (dtrace_pid_probe_ptr != NULL &&
                (*dtrace_pid_probe_ptr)(regs) == 0) {
            lck_rw_unlock_shared(rwp);
            return KERN_SUCCESS;
        }
        lck_rw_unlock_shared(rwp);


        /*
         * If the instruction that caused the breakpoint trap doesn't
         * look like an int 3 anymore, it may be that this tracepoint
         * was removed just after the user thread executed it. In
         * that case, return to user land to retry the instuction.
         */
        user_addr_t pc = (regs64) ? regs64->isf.rip : (user_addr_t)regs32->eip;
        if (fuword8(pc - 1, &instr) == 0 && instr != FASTTRAP_INSTR) {
            if (regs64) {
                regs64->isf.rip--;
            } else {
                regs32->eip--;
            }
            return KERN_SUCCESS;
        }

    }

    return KERN_FAILURE;
}
Ejemplo n.º 12
0
void
lock_read_EXT(
	lck_rw_t	*lock)
{
	lck_rw_lock_shared( lock);
}
Ejemplo n.º 13
0
struct lpxpcb * Lpx_PCB_lookup( struct lpx_addr *faddr,	// peer addr:port 
								struct lpx_addr *laddr,	// local addr:port 
								int wildp,
								struct lpxpcb	*head
								)
{
    register struct lpxpcb *lpxp, *match = NULL;
    int matchwild = 3, wildcard = 0;	/* matching cost */
    u_short fport;
	u_short lport;

//	DEBUG_PRINT(DEBUG_MASK_PCB_TRACE, ("Lpx_PCB_lookup\n"));
	
    /* Check arguments */
    if (NULL == faddr
		|| NULL == head) {
		return (NULL);
    }
	
	fport = faddr->x_port;
	lport = laddr->x_port;
	
//    DEBUG_PRINT(DEBUG_MASK_PCB_INFO, ("PCBLU>> lpxpcb@(%lx):lpx_next@(%lx)  %02x:%02x:%02x:%02x:%02x:%02x faddr->xport = %d, lport = %d wildp = %d\n",&lpxpcb, lpxpcb.lpxp_next, faddr->x_node[0],faddr->x_node[1],faddr->x_node[2],faddr->x_node[3],faddr->x_node[4],faddr->x_node[5], faddr->x_port, lport, wildp));
	
	lck_rw_lock_shared(head->lpxp_list_rw);
	
    for (lpxp = (head)->lpxp_next; lpxp != (head); lpxp = lpxp->lpxp_next) {
		
 //       DEBUG_PRINT(DEBUG_MASK_PCB_INFO, ("PCBLU>> lpxp@(%lx) %02x:%02x:%02x:%02x:%02x:%02x lpxp_lport = %d, lpxp_fport = %d \n", lpxp, lpxp->lpxp_faddr.x_node[0], lpxp->lpxp_faddr.x_node[1], lpxp->lpxp_faddr.x_node[2], lpxp->lpxp_faddr.x_node[3], lpxp->lpxp_faddr.x_node[4], lpxp->lpxp_faddr.x_node[5], lpxp->lpxp_lport, lpxp->lpxp_fport));
		
		if (lpxp->lpxp_lport == lport) {	/* at least local port number should match */
			wildcard = 0;
			
			if (lpx_nullhost(lpxp->lpxp_faddr)) {	/* no peer address registered : INADDR_ANY */
				if (!lpx_nullhost(*faddr)) {
					if (lpx_hosteq(lpxp->lpxp_laddr, *laddr)) {
//						DEBUG_PRINT(DEBUG_MASK_PCB_INFO, ("Maybe broadcast packet.PCBLU>> lpxp@(%lx) %02x:%02x:%02x:%02x:%02x:%02x lpxp_lport = %d, lpxp_fport = %d \n", lpxp, lpxp->lpxp_laddr.x_node[0], lpxp->lpxp_laddr.x_node[1], lpxp->lpxp_laddr.x_node[2], lpxp->lpxp_laddr.x_node[3], lpxp->lpxp_laddr.x_node[4], lpxp->lpxp_laddr.x_node[5], lpxp->lpxp_lport, lpxp->lpxp_fport));
					} else {
						wildcard++;
					}
				}
			} else {				/* peer address is in the DB */
				if (lpx_nullhost(*faddr)) {
					wildcard++;
				} else {
					if (!lpx_hosteq(lpxp->lpxp_faddr, *faddr)) {/* peer address don't match */
						continue;
					}
					if (lpxp->lpxp_fport != fport) {
						if (lpxp->lpxp_fport != 0) {		/* peer port number don't match */
							continue;
						} else {
							wildcard++;
						}
					}
				}
			}

	//		DEBUG_PRINT(DEBUG_MASK_PCB_INFO, ("PCBLU>> matchwild = %d, wildcard = %d\n",matchwild, wildcard));

			if (wildcard && (wildp == 0)) {	/* if wildcard match is allowed */
				continue;
			}

			if (wildcard < matchwild) {
				match = lpxp;
				matchwild = wildcard;
				
				if (wildcard == 0) {		/* exact match : all done */
					break;
				}
			}
		} /* if (lpxp->lpxp_lport == lport) */
    }	/* for (lpxp = ...) ... */
	
	lck_rw_unlock_shared(head->lpxp_list_rw);

	//DEBUG_PRINT(DEBUG_MASK_PCB_INFO, ("PCBLU>> match@(%lx) matchwild = %d, wildcard = %d\n", match, matchwild, wildcard));
	
	if (match) {
//		DEBUG_PRINT(DEBUG_MASK_PCB_INFO, ("PCBLU>> match@(%lx) matchwild = %d, wildcard = %d\n", match, matchwild, wildcard));
	}
	
	return (match);
}