Exemplo n.º 1
0
static errno_t
ipf_add(
	const struct ipf_filter* filter,
	ipfilter_t *filter_ref,
	struct ipfilter_list *head)
{
	struct ipfilter	*new_filter;
	if (filter->name == NULL || (filter->ipf_input == NULL && filter->ipf_output == NULL))
		return EINVAL;
	
	MALLOC(new_filter, struct ipfilter*, sizeof(*new_filter), M_IFADDR, M_WAITOK);
	if (new_filter == NULL)
		return ENOMEM;
	
	lck_mtx_lock(kipf_lock);
	new_filter->ipf_filter = *filter;
	new_filter->ipf_head = head;
	
	TAILQ_INSERT_HEAD(head, new_filter, ipf_link);
	
	lck_mtx_unlock(kipf_lock);
	
	*filter_ref = (ipfilter_t)new_filter;

	/* This will force TCP to re-evaluate its use of TSO */
	OSAddAtomic(1, &kipf_count);
	routegenid_update();

	return 0;
}
Exemplo n.º 2
0
int
fuse_filehandle_put(vnode_t vp, vfs_context_t context, fufh_type_t fufh_type)
{
    struct fuse_dispatcher  fdi;
    struct fuse_release_in *fri;
    struct fuse_vnode_data *fvdat = VTOFUD(vp);
    struct fuse_filehandle *fufh  = NULL;

    int err   = 0;
    int isdir = 0;
    int op    = FUSE_RELEASE;

    const bool wait_for_completion = true;

    fuse_trace_printf("fuse_filehandle_put(vp=%p, fufh_type=%d)\n",
                      vp, fufh_type);

    fufh = &(fvdat->fufh[fufh_type]);

    if (FUFH_IS_VALID(fufh)) {
        panic("fuse4x: filehandle_put called on a valid fufh (type=%d)",
              fufh_type);
        /* NOTREACHED */
    }

    if (fuse_isdeadfs(vp)) {
        goto out;
    }

    if (vnode_isdir(vp)) {
        op = FUSE_RELEASEDIR;
        isdir = 1;
    }

    fdisp_init(&fdi, sizeof(*fri));
    fdisp_make_vp(&fdi, op, vp, context);
    fri = fdi.indata;
    fri->fh = fufh->fh_id;
    fri->flags = fufh->open_flags;

    if (wait_for_completion) {
        if ((err = fdisp_wait_answ(&fdi))) {
            goto out;
        } else {
            fuse_ticket_drop(fdi.tick);
        }
    } else {
        fuse_insert_callback(fdi.tick, NULL);
        fuse_insert_message(fdi.tick);
    }

out:
    OSAddAtomic(-1, (SInt32 *)&fuse_fh_current);
    fuse_invalidate_attr(vp);

    return err;
}
Exemplo n.º 3
0
void
kern_os_free(void * addr)
{
    size_t size;
    size = kalloc_size(addr);
#if OSALLOCDEBUG
	OSAddAtomic(-size, &debug_iomalloc_size);
#endif

    kfree_addr(addr);
}
Exemplo n.º 4
0
/*
 * Allocate kva for pipe circular buffer, the space is pageable
 * This routine will 'realloc' the size of a pipe safely, if it fails
 * it will retain the old buffer.
 * If it fails it will return ENOMEM.
 */
static int
pipespace(struct pipe *cpipe, int size)
{
	vm_offset_t buffer;

	if (size <= 0)
		return(EINVAL);

	if ((buffer = (vm_offset_t)kalloc(size)) == 0 )
		return(ENOMEM);

	/* free old resources if we're resizing */
	pipe_free_kmem(cpipe);
	cpipe->pipe_buffer.buffer = (caddr_t)buffer;
	cpipe->pipe_buffer.size = size;
	cpipe->pipe_buffer.in = 0;
	cpipe->pipe_buffer.out = 0;
	cpipe->pipe_buffer.cnt = 0;

	OSAddAtomic(1, &amountpipes);
	OSAddAtomic(cpipe->pipe_buffer.size, &amountpipekva);

	return (0);
}
Exemplo n.º 5
0
errno_t
ipf_remove(
	ipfilter_t filter_ref)
{
	struct ipfilter	*match = (struct ipfilter*)filter_ref;
	struct ipfilter_list *head;
	
	if (match == 0 || (match->ipf_head != &ipv4_filters && match->ipf_head != &ipv6_filters))
		return EINVAL;
	
	head = match->ipf_head;
	
	lck_mtx_lock(kipf_lock);
	TAILQ_FOREACH(match, head, ipf_link) {
		if (match == (struct ipfilter*)filter_ref) {
			ipf_detach_func ipf_detach = match->ipf_filter.ipf_detach;
			void* cookie = match->ipf_filter.cookie;
			
			/*
			 * Cannot detach when they are filters running
			 */
			if (kipf_ref) {
				kipf_delayed_remove++;
				TAILQ_INSERT_TAIL(&tbr_filters, match, ipf_tbr);
				match->ipf_filter.ipf_input = 0;
				match->ipf_filter.ipf_output = 0;
				lck_mtx_unlock(kipf_lock);
			} else {
				TAILQ_REMOVE(head, match, ipf_link);
				lck_mtx_unlock(kipf_lock);
				if (ipf_detach)
					ipf_detach(cookie);
				FREE(match, M_IFADDR);

				/* This will force TCP to re-evaluate its use of TSO */
				OSAddAtomic(-1, &kipf_count);
				if (use_routegenid)
					routegenid_update();

			}
			return 0;
		}
	}
	lck_mtx_unlock(kipf_lock);
	
	return ENOENT;
}
Exemplo n.º 6
0
void *
kern_os_malloc(size_t size)
{
    void *mem;
    if (size == 0) {
        return (0);
    }

    mem = kallocp_tag_bt((vm_size_t *)&size, VM_KERN_MEMORY_LIBKERN);
    if (!mem) {
        return (0);
    }

#if OSALLOCDEBUG
    OSAddAtomic(size, &debug_iomalloc_size);
#endif

    bzero(mem, size);

    return mem;
}
Exemplo n.º 7
0
void *
kern_os_realloc(
    void   * addr,
    size_t   nsize)
{
    void            *nmem;
    size_t          osize;

    if (!addr) {
        return (kern_os_malloc(nsize));
    }

    osize = kalloc_size(addr);
    if (nsize == osize) {
        return (addr);
    }

    if (nsize == 0) {
        kfree_addr(addr);
        return (0);
    }

    nmem = kallocp_tag_bt((vm_size_t *)&nsize, VM_KERN_MEMORY_LIBKERN);
    if (!nmem){
        kfree_addr(addr);
        return (0);
    }

#if OSALLOCDEBUG
    OSAddAtomic((nsize - osize), &debug_iomalloc_size);
#endif

    if (nsize > osize) {
        (void)memset((char *)nmem + osize, 0, nsize - osize);
    }
    (void)memcpy(nmem, addr, (nsize > osize) ? osize : nsize);
    kfree_addr(addr);

    return (nmem);
}
Exemplo n.º 8
0
/*
 * Because of the vagaries of how a filehandle can be used, we try not to
 * be too smart in here (we try to be smart elsewhere). It is required that
 * you come in here only if you really do not have the said filehandle--else
 * we panic.
 */
int
fuse_filehandle_get(vnode_t       vp,
                    vfs_context_t context,
                    fufh_type_t   fufh_type,
                    int           mode)
{
    struct fuse_dispatcher  fdi;
    struct fuse_open_in    *foi;
    struct fuse_open_out   *foo;
    struct fuse_filehandle *fufh;
    struct fuse_vnode_data *fvdat = VTOFUD(vp);

    int err    = 0;
    int isdir  = 0;
    int oflags = 0;
    int op     = FUSE_OPEN;

    fuse_trace_printf("fuse_filehandle_get(vp=%p, fufh_type=%d, mode=%x)\n",
                      vp, fufh_type, mode);

    fufh = &(fvdat->fufh[fufh_type]);

    if (FUFH_IS_VALID(fufh)) {
        panic("fuse4x: filehandle_get called despite valid fufh (type=%d)",
              fufh_type);
        /* NOTREACHED */
    }

    /*
     * Note that this means we are effectively FILTERING OUT open() flags.
     */
    (void)mode;
    oflags = fuse_filehandle_xlate_to_oflags(fufh_type);

    if (vnode_isdir(vp)) {
        isdir = 1;
        op = FUSE_OPENDIR;
        if (fufh_type != FUFH_RDONLY) {
            log("fuse4x: non-rdonly fufh requested for directory\n");
            fufh_type = FUFH_RDONLY;
        }
    }

    fdisp_init(&fdi, sizeof(*foi));
    fdisp_make_vp(&fdi, op, vp, context);

    if (vnode_islnk(vp) && (mode & O_SYMLINK)) {
        oflags |= O_SYMLINK;
    }

    foi = fdi.indata;
    foi->flags = oflags;

    OSAddAtomic(1, (SInt32 *)&fuse_fh_upcall_count);
    if ((err = fdisp_wait_answ(&fdi))) {
#if M_FUSE4X_ENABLE_UNSUPPORTED
        const char *vname = vnode_getname(vp);
#endif /* M_FUSE4X_ENABLE_UNSUPPORTED */
        if (err == ENOENT) {
            /*
             * See comment in fuse_vnop_reclaim().
             */
            cache_purge(vp);
        }
#if M_FUSE4X_ENABLE_UNSUPPORTED
        log("fuse4x: filehandle_get: failed for %s "
              "(type=%d, err=%d, caller=%p)\n",
              (vname) ? vname : "?", fufh_type, err,
               __builtin_return_address(0));
        if (vname) {
            vnode_putname(vname);
        }
#endif /* M_FUSE4X_ENABLE_UNSUPPORTED */
        if (err == ENOENT) {
#if M_FUSE4X_ENABLE_BIGLOCK
            struct fuse_data *data = fuse_get_mpdata(vnode_mount(vp));
            fuse_biglock_unlock(data->biglock);
#endif
            fuse_internal_vnode_disappear(vp, context, REVOKE_SOFT);
#if M_FUSE4X_ENABLE_BIGLOCK
            fuse_biglock_lock(data->biglock);
#endif
        }
        return err;
    }
    OSAddAtomic(1, (SInt32 *)&fuse_fh_current);

    foo = fdi.answ;

    fufh->fh_id = foo->fh;
    fufh->open_count = 1;
    fufh->open_flags = oflags;
    fufh->fuse_open_flags = foo->open_flags;
    fufh->aux_count = 0;

    fuse_ticket_drop(fdi.tick);

    return 0;
}
Exemplo n.º 9
0
SInt32	OSDecrementAtomic(volatile SInt32 * value)
{
	return OSAddAtomic(-1, value);
}
Exemplo n.º 10
0
/*
 * free:
 *
 * Free resources
 */
void IOBufferMemoryDescriptor::free()
{
    // Cache all of the relevant information on the stack for use
    // after we call super::free()!
    IOOptionBits     flags         = _flags;
    IOOptionBits     internalFlags = _internalFlags;
    IOOptionBits     options   = _options;
    vm_size_t        size      = _capacity;
    void *           buffer    = _buffer;
    IOMemoryMap *    map       = 0;
    IOAddressRange * range     = _ranges.v64;
    vm_offset_t      alignment = _alignment;

    if (alignment >= page_size)
	size = round_page(size);

    if (reserved)
    {
	map = reserved->map;
        IODelete( reserved, ExpansionData, 1 );
	if (map)
	    map->release();
    }

    /* super::free may unwire - deallocate buffer afterwards */
    super::free();

    if (options & kIOMemoryPageable)
    {
#if IOALLOCDEBUG
	OSAddAtomicLong(-(round_page(size)), &debug_iomallocpageable_size);
#endif
    }
    else if (buffer)
    {
	if (kInternalFlagPageSized & internalFlags) size = round_page(size);

        if (kInternalFlagPhysical & internalFlags)
        {
            IOKernelFreePhysical((mach_vm_address_t) buffer, size);
	}
	else if (kInternalFlagPageAllocated & internalFlags)
	{
	    uintptr_t page;
            page = iopa_free(&gIOBMDPageAllocator, (uintptr_t) buffer, size);
	    if (page)
	    {
		kmem_free(kernel_map, page, page_size);
	    }
#if IOALLOCDEBUG
		OSAddAtomic(-size, &debug_iomalloc_size);
#endif
	    IOStatisticsAlloc(kIOStatisticsFreeAligned, size);
	}
        else if (alignment > 1)
	{
            IOFreeAligned(buffer, size);
	}
        else
	{
            IOFree(buffer, size);
	}
    }
    if (range && (kIOMemoryAsReference & flags))
	IODelete(range, IOAddressRange, 1);
}
Exemplo n.º 11
0
bool IOBufferMemoryDescriptor::initWithPhysicalMask(
				task_t		  inTask,
				IOOptionBits      options,
				mach_vm_size_t    capacity,
				mach_vm_address_t alignment,
				mach_vm_address_t physicalMask)
{
    task_t		  mapTask = NULL;
    vm_map_t 		  vmmap = NULL;
    mach_vm_address_t     highestMask = 0;
    IOOptionBits	  iomdOptions = kIOMemoryTypeVirtual64 | kIOMemoryAsReference;
    IODMAMapSpecification mapSpec;
    bool                  mapped = false;
    bool                  needZero;

    if (!capacity) return false;

    _options   	      = options;
    _capacity         = capacity;
    _internalFlags    = 0;
    _internalReserved = 0;
    _buffer	      = 0;

    _ranges.v64 = IONew(IOAddressRange, 1);
    if (!_ranges.v64)
	return (false);
    _ranges.v64->address = 0;
    _ranges.v64->length  = 0;
    //  make sure super::free doesn't dealloc _ranges before super::init
    _flags = kIOMemoryAsReference;

    // Grab IOMD bits from the Buffer MD options
    iomdOptions  |= (options & kIOBufferDescriptorMemoryFlags);

    if (!(kIOMemoryMapperNone & options))
    {
	IOMapper::checkForSystemMapper();
	mapped = (0 != IOMapper::gSystem);
    }
    needZero = (mapped || (0 != (kIOMemorySharingTypeMask & options)));

    if (physicalMask && (alignment <= 1))
    {
	alignment   = ((physicalMask ^ (-1ULL)) & (physicalMask - 1));
	highestMask = (physicalMask | alignment);
	alignment++;
	if (alignment < page_size)
            alignment = page_size;
    }

    if ((options & (kIOMemorySharingTypeMask | kIOMapCacheMask | kIOMemoryClearEncrypt)) && (alignment < page_size))
	alignment = page_size;

    if (alignment >= page_size)
	capacity = round_page(capacity);

    if (alignment > page_size)
	options |= kIOMemoryPhysicallyContiguous;

    _alignment = alignment;

    if ((capacity + alignment) < _capacity) return (false);

    if ((inTask != kernel_task) && !(options & kIOMemoryPageable))
	return false;

    bzero(&mapSpec, sizeof(mapSpec));
    mapSpec.alignment      = _alignment;
    mapSpec.numAddressBits = 64;
    if (highestMask && mapped)
    {
	if (highestMask <= 0xFFFFFFFF)
	    mapSpec.numAddressBits = (32 - __builtin_clz((unsigned int) highestMask));
	else
	    mapSpec.numAddressBits = (64 - __builtin_clz((unsigned int) (highestMask >> 32)));
	highestMask = 0;
    }

    // set memory entry cache mode, pageable, purgeable
    iomdOptions |= ((options & kIOMapCacheMask) >> kIOMapCacheShift) << kIOMemoryBufferCacheShift;
    if (options & kIOMemoryPageable)
    {
	iomdOptions |= kIOMemoryBufferPageable;
	if (options & kIOMemoryPurgeable) iomdOptions |= kIOMemoryBufferPurgeable;
    }
    else
    {
	vmmap = kernel_map;

	// Buffer shouldn't auto prepare they should be prepared explicitly
	// But it never was enforced so what are you going to do?
	iomdOptions |= kIOMemoryAutoPrepare;

	/* Allocate a wired-down buffer inside kernel space. */

	bool contig = (0 != (options & kIOMemoryHostPhysicallyContiguous));

	if (!contig && (0 != (options & kIOMemoryPhysicallyContiguous)))
	{
	    contig |= (!mapped);
	    contig |= (0 != (kIOMemoryMapperNone & options));
#if 0
	    // treat kIOMemoryPhysicallyContiguous as kIOMemoryHostPhysicallyContiguous for now
	    contig |= true;
#endif
	}

	if (contig || highestMask || (alignment > page_size))
	{
            _internalFlags |= kInternalFlagPhysical;
            if (highestMask)
            {
                _internalFlags |= kInternalFlagPageSized;
                capacity = round_page(capacity);
            }
            _buffer = (void *) IOKernelAllocateWithPhysicalRestrict(
            				capacity, highestMask, alignment, contig);
	}
	else if (needZero
		  && ((capacity + alignment) <= (page_size - gIOPageAllocChunkBytes)))
	{
            _internalFlags |= kInternalFlagPageAllocated;
            needZero        = false;
            _buffer         = (void *) iopa_alloc(&gIOBMDPageAllocator, &IOBMDPageProc, capacity, alignment);
	    if (_buffer)
	    {
		IOStatisticsAlloc(kIOStatisticsMallocAligned, capacity);
#if IOALLOCDEBUG
		OSAddAtomic(capacity, &debug_iomalloc_size);
#endif
	    }
	}
	else if (alignment > 1)
	{
            _buffer = IOMallocAligned(capacity, alignment);
	}
	else
	{
            _buffer = IOMalloc(capacity);
	}
	if (!_buffer)
	{
            return false;
	}
	if (needZero) bzero(_buffer, capacity);
    }

    if( (options & (kIOMemoryPageable | kIOMapCacheMask))) {
	vm_size_t	size = round_page(capacity);

	// initWithOptions will create memory entry
	iomdOptions |= kIOMemoryPersistent;

	if( options & kIOMemoryPageable) {
#if IOALLOCDEBUG
	    OSAddAtomicLong(size, &debug_iomallocpageable_size);
#endif
	    mapTask = inTask;
	    if (NULL == inTask)
		inTask = kernel_task;
	}
	else if (options & kIOMapCacheMask)
	{
	    // Prefetch each page to put entries into the pmap
	    volatile UInt8 *	startAddr = (UInt8 *)_buffer;
	    volatile UInt8 *	endAddr   = (UInt8 *)_buffer + capacity;

	    while (startAddr < endAddr)
	    {
		UInt8 dummyVar = *startAddr;
		(void) dummyVar;
		startAddr += page_size;
 	    }
	}
    }

    _ranges.v64->address = (mach_vm_address_t) _buffer;;
    _ranges.v64->length  = _capacity;

    if (!super::initWithOptions(_ranges.v64, 1, 0,
				inTask, iomdOptions, /* System mapper */ 0))
	return false;

    // give any system mapper the allocation params
    if (kIOReturnSuccess != dmaCommandOperation(kIOMDAddDMAMapSpec, 
    						&mapSpec, sizeof(mapSpec)))
	return false;

    if (mapTask)
    {
	if (!reserved) {
	    reserved = IONew( ExpansionData, 1 );
	    if( !reserved)
		return( false );
	}
	reserved->map = createMappingInTask(mapTask, 0, 
			    kIOMapAnywhere | (options & kIOMapPrefault) | (options & kIOMapCacheMask), 0, 0);
	if (!reserved->map)
	{
	    _buffer = 0;
	    return( false );
	}
	release();	    // map took a retain on this
	reserved->map->retain();
	removeMapping(reserved->map);
	mach_vm_address_t buffer = reserved->map->getAddress();
	_buffer = (void *) buffer;
	if (kIOMemoryTypeVirtual64 == (kIOMemoryTypeMask & iomdOptions))
	    _ranges.v64->address = buffer;
    }

    setLength(_capacity);

    return true;
}
Exemplo n.º 12
0
bool IOSharedEventQueue::EnqueueTracker(DataArgs * data)
{
    uint32_t singleTrackerLen = sizeof(DataArgs);
    const UInt32 head = dataQueue->head;
    const UInt32 tail = dataQueue->tail;

    LOG(LOG_DEBUG, "head=%d", dataQueue->head);
    LOG(LOG_DEBUG, "tail=%d", dataQueue->tail);

    const UInt32 entrySize = singleTrackerLen+DATA_QUEUE_ENTRY_HEADER_SIZE;
    IODataQueueEntry *entry;

    if(singleTrackerLen>UINT32_MAX-DATA_QUEUE_ENTRY_HEADER_SIZE)
    {
        return false;
    }

    LOG(LOG_DEBUG, "this->getQueueSize()=%d", this->getQueueSize());
    if(this->getQueueSize()<tail)
    {
        return false;
    }

    if(tail>=head)
    {
        if(entrySize<=UINT32_MAX-DATA_QUEUE_ENTRY_HEADER_SIZE &&
        tail+entrySize<=this->getQueueSize())
        {
            entry = (IODataQueueEntry*)((uint8_t*)dataQueue->queue+dataQueue->tail);
            entry->size=singleTrackerLen;
            memcpy(entry->data, data, singleTrackerLen);
            OSAddAtomic(entrySize, (SInt32*)&(dataQueue->tail));
        }
        else if(head>singleTrackerLen)
        {
            dataQueue->queue->size = singleTrackerLen;

            if ( ( getQueueSize() - tail ) >= DATA_QUEUE_ENTRY_HEADER_SIZE )
            {
                ((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = entrySize;
            }

            memcpy(&dataQueue->queue->data, data, singleTrackerLen);
            OSCompareAndSwap(dataQueue->tail, entrySize, &dataQueue->tail);
        }
        else
        {
            return false;
        }
    }
    else
    {
        if ( (head - tail) > entrySize )
        {
            entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);

            entry->size = singleTrackerLen;
            memcpy(&entry->data, data, singleTrackerLen);
            OSAddAtomic(entrySize, (SInt32 *)&dataQueue->tail);
        }
        else
        {
            return false;    // queue is full
        }
    }

    if(head==tail) return true;

    //send notification to port if any data is added to queue.
    //if ( (this->_status&kSharedEventQueueNotifyWhenAddData) || ( head == tail ) || ( dataQueue->head == tail ))
    {
        sendDataAvailableNotification();
    }

    return true;
}
Exemplo n.º 13
0
/*
 * Look for the request in the cache
 * If found then
 *    return action and optionally reply
 * else
 *    insert it in the cache
 *
 * The rules are as follows:
 * - if in progress, return DROP request
 * - if completed within DELAY of the current time, return DROP it
 * - if completed a longer time ago return REPLY if the reply was cached or
 *   return DOIT
 * Update/add new request at end of lru list
 */
int
nfsrv_getcache(
    struct nfsrv_descript *nd,
    struct nfsrv_sock *slp,
    mbuf_t *mrepp)
{
    struct nfsrvcache *rp;
    struct nfsm_chain nmrep;
    struct sockaddr *saddr;
    int ret, error;

    /*
     * Don't cache recent requests for reliable transport protocols.
     * (Maybe we should for the case of a reconnect, but..)
     */
    if (!nd->nd_nam2)
        return (RC_DOIT);
    lck_mtx_lock(nfsrv_reqcache_mutex);
loop:
    for (rp = NFSRCHASH(nd->nd_retxid)->lh_first; rp != 0;
            rp = rp->rc_hash.le_next) {
        if (nd->nd_retxid == rp->rc_xid && nd->nd_procnum == rp->rc_proc &&
                netaddr_match(rp->rc_family, &rp->rc_haddr, nd->nd_nam)) {
            if ((rp->rc_flag & RC_LOCKED) != 0) {
                rp->rc_flag |= RC_WANTED;
                msleep(rp, nfsrv_reqcache_mutex, PZERO-1, "nfsrc", NULL);
                goto loop;
            }
            rp->rc_flag |= RC_LOCKED;
            /* If not at end of LRU chain, move it there */
            if (rp->rc_lru.tqe_next) {
                TAILQ_REMOVE(&nfsrv_reqcache_lruhead, rp, rc_lru);
                TAILQ_INSERT_TAIL(&nfsrv_reqcache_lruhead, rp, rc_lru);
            }
            if (rp->rc_state == RC_UNUSED)
                panic("nfsrv cache");
            if (rp->rc_state == RC_INPROG) {
                OSAddAtomic(1, &nfsstats.srvcache_inproghits);
                ret = RC_DROPIT;
            } else if (rp->rc_flag & RC_REPSTATUS) {
                OSAddAtomic(1, &nfsstats.srvcache_nonidemdonehits);
                nd->nd_repstat = rp->rc_status;
                error = nfsrv_rephead(nd, slp, &nmrep, 0);
                if (error) {
                    printf("nfsrv cache: reply alloc failed for nonidem request hit\n");
                    ret = RC_DROPIT;
                    *mrepp = NULL;
                } else {
                    ret = RC_REPLY;
                    *mrepp = nmrep.nmc_mhead;
                }
            } else if (rp->rc_flag & RC_REPMBUF) {
                OSAddAtomic(1, &nfsstats.srvcache_nonidemdonehits);
                error = mbuf_copym(rp->rc_reply, 0, MBUF_COPYALL, MBUF_WAITOK, mrepp);
                if (error) {
                    printf("nfsrv cache: reply copym failed for nonidem request hit\n");
                    ret = RC_DROPIT;
                } else {
                    ret = RC_REPLY;
                }
            } else {
                OSAddAtomic(1, &nfsstats.srvcache_idemdonehits);
                rp->rc_state = RC_INPROG;
                ret = RC_DOIT;
            }
            rp->rc_flag &= ~RC_LOCKED;
            if (rp->rc_flag & RC_WANTED) {
                rp->rc_flag &= ~RC_WANTED;
                wakeup(rp);
            }
            lck_mtx_unlock(nfsrv_reqcache_mutex);
            return (ret);
        }
    }
    OSAddAtomic(1, &nfsstats.srvcache_misses);
    if (nfsrv_reqcache_count < nfsrv_reqcache_size) {
        /* try to allocate a new entry */
        MALLOC(rp, struct nfsrvcache *, sizeof *rp, M_NFSD, M_WAITOK);
        if (rp) {
            bzero((char *)rp, sizeof *rp);
            nfsrv_reqcache_count++;
            rp->rc_flag = RC_LOCKED;
        }
    } else {
Exemplo n.º 14
0
static errno_t
fuse_vfsop_mount(mount_t mp, __unused vnode_t devvp, user_addr_t udata,
                 vfs_context_t context)
{
    int err      = 0;
    int mntopts  = 0;
    bool mounted = false;

    uint32_t max_read = ~0;

    size_t len;

    fuse_device_t      fdev = NULL;
    struct fuse_data  *data = NULL;
    fuse_mount_args    fusefs_args;
    struct vfsstatfs  *vfsstatfsp = vfs_statfs(mp);

#if M_FUSE4X_ENABLE_BIGLOCK
    lck_mtx_t         *biglock;
#endif

    fuse_trace_printf_vfsop();

    if (vfs_isupdate(mp)) {
        return ENOTSUP;
    }

    err = copyin(udata, &fusefs_args, sizeof(fusefs_args));
    if (err) {
        return EINVAL;
    }

    /*
     * Interesting flags that we can receive from mount or may want to
     * otherwise forcibly set include:
     *
     *     MNT_ASYNC
     *     MNT_AUTOMOUNTED
     *     MNT_DEFWRITE
     *     MNT_DONTBROWSE
     *     MNT_IGNORE_OWNERSHIP
     *     MNT_JOURNALED
     *     MNT_NODEV
     *     MNT_NOEXEC
     *     MNT_NOSUID
     *     MNT_NOUSERXATTR
     *     MNT_RDONLY
     *     MNT_SYNCHRONOUS
     *     MNT_UNION
     */

    err = ENOTSUP;

#if M_FUSE4X_ENABLE_UNSUPPORTED
    vfs_setlocklocal(mp);
#endif /* M_FUSE4X_ENABLE_UNSUPPORTED */

    /** Option Processing. **/

    if (*fusefs_args.fstypename) {
        size_t typenamelen = strlen(fusefs_args.fstypename);
        if (typenamelen > FUSE_FSTYPENAME_MAXLEN) {
            return EINVAL;
        }
        snprintf(vfsstatfsp->f_fstypename, MFSTYPENAMELEN, "%s%s",
                 FUSE_FSTYPENAME_PREFIX, fusefs_args.fstypename);
    }

    if (!*fusefs_args.fsname)
        return EINVAL;

    if ((fusefs_args.daemon_timeout > FUSE_MAX_DAEMON_TIMEOUT) ||
            (fusefs_args.daemon_timeout < FUSE_MIN_DAEMON_TIMEOUT)) {
        return EINVAL;
    }

    if ((fusefs_args.init_timeout > FUSE_MAX_INIT_TIMEOUT) ||
            (fusefs_args.init_timeout < FUSE_MIN_INIT_TIMEOUT)) {
        return EINVAL;
    }

    if (fusefs_args.altflags & FUSE_MOPT_SPARSE) {
        mntopts |= FSESS_SPARSE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_AUTO_CACHE) {
        mntopts |= FSESS_AUTO_CACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_AUTO_XATTR) {
        if (fusefs_args.altflags & FUSE_MOPT_NATIVE_XATTR) {
            return EINVAL;
        }
        mntopts |= FSESS_AUTO_XATTR;
    } else if (fusefs_args.altflags & FUSE_MOPT_NATIVE_XATTR) {
        mntopts |= FSESS_NATIVE_XATTR;
    }

    if (fusefs_args.altflags & FUSE_MOPT_JAIL_SYMLINKS) {
        mntopts |= FSESS_JAIL_SYMLINKS;
    }

    /*
     * Note that unlike Linux, which keeps allow_root in user-space and
     * passes allow_other in that case to the kernel, we let allow_root
     * reach the kernel. The 'if' ordering is important here.
     */
    if (fusefs_args.altflags & FUSE_MOPT_ALLOW_ROOT) {
        int is_member = 0;
        if ((kauth_cred_ismember_gid(kauth_cred_get(), fuse_admin_group, &is_member) != 0) || !is_member) {
            log("fuse4x: caller is not a member of fuse4x admin group. "
                "Either add user (id=%d) to group (id=%d), "
                "or set correct '" SYSCTL_FUSE4X_TUNABLES_ADMIN "' sysctl value.\n",
                kauth_cred_getuid(kauth_cred_get()), fuse_admin_group);
            return EPERM;
        }
        mntopts |= FSESS_ALLOW_ROOT;
    } else if (fusefs_args.altflags & FUSE_MOPT_ALLOW_OTHER) {
        if (!fuse_allow_other && !fuse_vfs_context_issuser(context)) {
            int is_member = 0;
            if ((kauth_cred_ismember_gid(kauth_cred_get(), fuse_admin_group, &is_member) != 0) || !is_member) {
                log("fuse4x: caller is not a member of fuse4x admin group. "
                    "Either add user (id=%d) to group (id=%d), "
                    "or set correct '" SYSCTL_FUSE4X_TUNABLES_ADMIN "' sysctl value.\n",
                    kauth_cred_getuid(kauth_cred_get()), fuse_admin_group);
                return EPERM;
            }
        }
        mntopts |= FSESS_ALLOW_OTHER;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_APPLEDOUBLE) {
        mntopts |= FSESS_NO_APPLEDOUBLE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_APPLEXATTR) {
        mntopts |= FSESS_NO_APPLEXATTR;
    }

    if ((fusefs_args.altflags & FUSE_MOPT_FSID) && (fusefs_args.fsid != 0)) {
        fsid_t   fsid;
        mount_t  other_mp;
        uint32_t target_dev;

        target_dev = FUSE_MAKEDEV(FUSE_CUSTOM_FSID_DEVICE_MAJOR,
                                  fusefs_args.fsid);

        fsid.val[0] = target_dev;
        fsid.val[1] = FUSE_CUSTOM_FSID_VAL1;

        other_mp = vfs_getvfs(&fsid);
        if (other_mp != NULL) {
            return EPERM;
        }

        vfsstatfsp->f_fsid.val[0] = target_dev;
        vfsstatfsp->f_fsid.val[1] = FUSE_CUSTOM_FSID_VAL1;

    } else {
        vfs_getnewfsid(mp);
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_ATTRCACHE) {
        mntopts |= FSESS_NO_ATTRCACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_READAHEAD) {
        mntopts |= FSESS_NO_READAHEAD;
    }

    if (fusefs_args.altflags & (FUSE_MOPT_NO_UBC | FUSE_MOPT_DIRECT_IO)) {
        mntopts |= FSESS_NO_UBC;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_VNCACHE) {
        mntopts |= FSESS_NO_VNCACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NEGATIVE_VNCACHE) {
        if (mntopts & FSESS_NO_VNCACHE) {
            return EINVAL;
        }
        mntopts |= FSESS_NEGATIVE_VNCACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_SYNCWRITES) {

        /* Cannot mix 'nosyncwrites' with 'noubc' or 'noreadahead'. */
        if (mntopts & (FSESS_NO_READAHEAD | FSESS_NO_UBC)) {
            log("fuse4x: cannot mix 'nosyncwrites' with 'noubc' or 'noreadahead'\n");
            return EINVAL;
        }

        mntopts |= FSESS_NO_SYNCWRITES;
        vfs_clearflags(mp, MNT_SYNCHRONOUS);
        vfs_setflags(mp, MNT_ASYNC);

        /* We check for this only if we have nosyncwrites in the first place. */
        if (fusefs_args.altflags & FUSE_MOPT_NO_SYNCONCLOSE) {
            mntopts |= FSESS_NO_SYNCONCLOSE;
        }

    } else {
        vfs_clearflags(mp, MNT_ASYNC);
        vfs_setflags(mp, MNT_SYNCHRONOUS);
    }

    if (mntopts & FSESS_NO_UBC) {
        /* If no buffer cache, disallow exec from file system. */
        vfs_setflags(mp, MNT_NOEXEC);
    }

    vfs_setauthopaque(mp);
    vfs_setauthopaqueaccess(mp);

    if ((fusefs_args.altflags & FUSE_MOPT_DEFAULT_PERMISSIONS) &&
            (fusefs_args.altflags & FUSE_MOPT_DEFER_PERMISSIONS)) {
        return EINVAL;
    }

    if (fusefs_args.altflags & FUSE_MOPT_DEFAULT_PERMISSIONS) {
        mntopts |= FSESS_DEFAULT_PERMISSIONS;
        vfs_clearauthopaque(mp);
    }

    if (fusefs_args.altflags & FUSE_MOPT_DEFER_PERMISSIONS) {
        mntopts |= FSESS_DEFER_PERMISSIONS;
    }

    if (fusefs_args.altflags & FUSE_MOPT_EXTENDED_SECURITY) {
        mntopts |= FSESS_EXTENDED_SECURITY;
        vfs_setextendedsecurity(mp);
    }

    if (fusefs_args.altflags & FUSE_MOPT_LOCALVOL) {
        vfs_setflags(mp, MNT_LOCAL);
    }
    /* done checking incoming option bits */

    err = 0;

    vfs_setfsprivate(mp, NULL);

    fdev = fuse_device_get(fusefs_args.rdev);
    if (!fdev) {
        log("fuse4x: invalid device file (number=%d)\n", fusefs_args.rdev);
        return EINVAL;
    }

    fuse_lck_mtx_lock(fdev->mtx);

    data = fdev->data;

    if (!data) {
        fuse_lck_mtx_unlock(fdev->mtx);
        return ENXIO;
    }

#if M_FUSE4X_ENABLE_BIGLOCK
    biglock = data->biglock;
    fuse_biglock_lock(biglock);
#endif

    if (data->dataflags & FSESS_MOUNTED) {
#if M_FUSE4X_ENABLE_BIGLOCK
        fuse_biglock_unlock(biglock);
#endif
        fuse_lck_mtx_unlock(fdev->mtx);
        return EALREADY;
    }

    if (!(data->dataflags & FSESS_OPENED)) {
        fuse_lck_mtx_unlock(fdev->mtx);
        err = ENXIO;
        goto out;
    }

    data->dataflags |= FSESS_MOUNTED;
    OSAddAtomic(1, (SInt32 *)&fuse_mount_count);
    mounted = true;

    if (fdata_dead_get(data)) {
        fuse_lck_mtx_unlock(fdev->mtx);
        err = ENOTCONN;
        goto out;
    }

    if (!data->daemoncred) {
        panic("fuse4x: daemon found but identity unknown");
    }

    if (fuse_vfs_context_issuser(context) &&
            kauth_cred_getuid(vfs_context_ucred(context)) != kauth_cred_getuid(data->daemoncred)) {
        fuse_lck_mtx_unlock(fdev->mtx);
        err = EPERM;
        log("fuse4x: fuse daemon running by user_id=%d does not have privileges to mount on directory %s owned by user_id=%d\n",
            kauth_cred_getuid(data->daemoncred), vfsstatfsp->f_mntonname, kauth_cred_getuid(vfs_context_ucred(context)));
        goto out;
    }

    data->mp = mp;
    data->fdev = fdev;
    data->dataflags |= mntopts;

    data->daemon_timeout.tv_sec =  fusefs_args.daemon_timeout;
    data->daemon_timeout.tv_nsec = 0;
    if (data->daemon_timeout.tv_sec) {
        data->daemon_timeout_p = &(data->daemon_timeout);
    } else {
        data->daemon_timeout_p = NULL;
    }

    data->init_timeout.tv_sec = fusefs_args.init_timeout;
    data->init_timeout.tv_nsec = 0;

    data->max_read = max_read;
    data->fssubtype = fusefs_args.fssubtype;
    data->mountaltflags = fusefs_args.altflags;
    data->noimplflags = (uint64_t)0;

    data->blocksize = fuse_round_size(fusefs_args.blocksize,
                                      FUSE_MIN_BLOCKSIZE, FUSE_MAX_BLOCKSIZE);

    data->iosize = fuse_round_size(fusefs_args.iosize,
                                   FUSE_MIN_IOSIZE, FUSE_MAX_IOSIZE);

    if (data->iosize < data->blocksize) {
        data->iosize = data->blocksize;
    }

    data->userkernel_bufsize = FUSE_DEFAULT_USERKERNEL_BUFSIZE;

    copystr(fusefs_args.fsname, vfsstatfsp->f_mntfromname,
            MNAMELEN - 1, &len);
    bzero(vfsstatfsp->f_mntfromname + len, MNAMELEN - len);

    copystr(fusefs_args.volname, data->volname, MAXPATHLEN - 1, &len);
    bzero(data->volname + len, MAXPATHLEN - len);

    /* previous location of vfs_setioattr() */

    vfs_setfsprivate(mp, data);

    fuse_lck_mtx_unlock(fdev->mtx);

    /* Send a handshake message to the daemon. */
    fuse_send_init(data, context);

    struct vfs_attr vfs_attr;
    VFSATTR_INIT(&vfs_attr);
    // Our vfs_getattr() doesn't look at most *_IS_ACTIVE()'s
    err = fuse_vfsop_getattr(mp, &vfs_attr, context);
    if (!err) {
        vfsstatfsp->f_bsize  = vfs_attr.f_bsize;
        vfsstatfsp->f_iosize = vfs_attr.f_iosize;
        vfsstatfsp->f_blocks = vfs_attr.f_blocks;
        vfsstatfsp->f_bfree  = vfs_attr.f_bfree;
        vfsstatfsp->f_bavail = vfs_attr.f_bavail;
        vfsstatfsp->f_bused  = vfs_attr.f_bused;
        vfsstatfsp->f_files  = vfs_attr.f_files;
        vfsstatfsp->f_ffree  = vfs_attr.f_ffree;
        // vfsstatfsp->f_fsid already handled above
        vfsstatfsp->f_owner  = kauth_cred_getuid(data->daemoncred);
        vfsstatfsp->f_flags  = vfs_flags(mp);
        // vfsstatfsp->f_fstypename already handled above
        // vfsstatfsp->f_mntonname handled elsewhere
        // vfsstatfsp->f_mnfromname already handled above
        vfsstatfsp->f_fssubtype = data->fssubtype;
    }
    if (fusefs_args.altflags & FUSE_MOPT_BLOCKSIZE) {
        vfsstatfsp->f_bsize = data->blocksize;
    } else {
        //data->blocksize = vfsstatfsp->f_bsize;
    }
    if (fusefs_args.altflags & FUSE_MOPT_IOSIZE) {
        vfsstatfsp->f_iosize = data->iosize;
    } else {
        //data->iosize = (uint32_t)vfsstatfsp->f_iosize;
        vfsstatfsp->f_iosize = data->iosize;
    }

out:
    if (err) {
        vfs_setfsprivate(mp, NULL);

        fuse_lck_mtx_lock(fdev->mtx);
        data = fdev->data; /* again */
        if (mounted) {
            OSAddAtomic(-1, (SInt32 *)&fuse_mount_count);
        }
        if (data) {
            data->dataflags &= ~FSESS_MOUNTED;
            if (!(data->dataflags & FSESS_OPENED)) {
#if M_FUSE4X_ENABLE_BIGLOCK
                assert(biglock == data->biglock);
                fuse_biglock_unlock(biglock);
#endif
                fuse_device_close_final(fdev);
                /* data is gone now */
            }
        }
        fuse_lck_mtx_unlock(fdev->mtx);
    } else {
        vnode_t fuse_rootvp = NULLVP;
        err = fuse_vfsop_root(mp, &fuse_rootvp, context);
        if (err) {
            goto out; /* go back and follow error path */
        }
        err = vnode_ref(fuse_rootvp);
        (void)vnode_put(fuse_rootvp);
        if (err) {
            goto out; /* go back and follow error path */
        } else {
            struct vfsioattr ioattr;

            vfs_ioattr(mp, &ioattr);
            ioattr.io_devblocksize = data->blocksize;
            vfs_setioattr(mp, &ioattr);
        }
    }

#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_lck_mtx_lock(fdev->mtx);
    data = fdev->data; /* ...and again */
    if(data) {
        assert(data->biglock == biglock);
        fuse_biglock_unlock(biglock);
    }
    fuse_lck_mtx_unlock(fdev->mtx);
#endif

    return err;
}
Exemplo n.º 15
0
Boolean IODataQueue::enqueue(void * data, UInt32 dataSize)
{
    const UInt32       head      = dataQueue->head;  // volatile
    const UInt32       tail      = dataQueue->tail;
    const UInt32       entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE;
    IODataQueueEntry * entry;

    // Check for overflow of entrySize
    if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) {
        return false;
    }
    // Check for underflow of (dataQueue->queueSize - tail)
    if (dataQueue->queueSize < tail) {
        return false;
    }

    if ( tail >= head )
    {
        // Is there enough room at the end for the entry?
        if ((entrySize <= UINT32_MAX - tail) &&
            ((tail + entrySize) <= dataQueue->queueSize) )
        {
            entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);

            entry->size = dataSize;
            memcpy(&entry->data, data, dataSize);

            // The tail can be out of bound when the size of the new entry
            // exactly matches the available space at the end of the queue.
            // The tail can range from 0 to dataQueue->queueSize inclusive.
            
            OSAddAtomic(entrySize, (SInt32 *)&dataQueue->tail);
        }
        else if ( head > entrySize )     // Is there enough room at the beginning?
        {
            // Wrap around to the beginning, but do not allow the tail to catch
            // up to the head.

            dataQueue->queue->size = dataSize;

            // We need to make sure that there is enough room to set the size before
            // doing this. The user client checks for this and will look for the size
            // at the beginning if there isn't room for it at the end.

            if ( ( dataQueue->queueSize - tail ) >= DATA_QUEUE_ENTRY_HEADER_SIZE )
            {
                ((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = dataSize;
            }

            memcpy(&dataQueue->queue->data, data, dataSize);
            OSCompareAndSwap(dataQueue->tail, entrySize, &dataQueue->tail);
        }
        else
        {
            return false;    // queue is full
        }
    }
    else
    {
        // Do not allow the tail to catch up to the head when the queue is full.
        // That's why the comparison uses a '>' rather than '>='.

        if ( (head - tail) > entrySize )
        {
            entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);

            entry->size = dataSize;
            memcpy(&entry->data, data, dataSize);
            OSAddAtomic(entrySize, (SInt32 *)&dataQueue->tail);
        }
        else
        {
            return false;    // queue is full
        }
    }

    // Send notification (via mach message) that data is available.

    if ( ( head == tail )                                                   /* queue was empty prior to enqueue() */
    ||   ( dataQueue->head == tail ) )   /* queue was emptied during enqueue() */
    {
        sendDataAvailableNotification();
    }

    return true;
}
Exemplo n.º 16
0
void
kfree(
	void 		*data,
	vm_size_t	size)
{
	zone_t z;

	if (size < MAX_SIZE_ZDLUT)
		z = get_zone_dlut(size);
	else if (size < kalloc_max_prerounded)
		z = get_zone_search(size, k_zindex_start);
	else {
		/* if size was too large for a zone, then use kmem_free */

		vm_map_t alloc_map = kernel_map;

		if ((((vm_offset_t) data) >= kalloc_map_min) && (((vm_offset_t) data) <= kalloc_map_max))
			alloc_map = kalloc_map;
		if (size > kalloc_largest_allocated) {
			        /*
				 * work around double FREEs of small MALLOCs
				 * this used to end up being a nop
				 * since the pointer being freed from an
				 * alloc backed by the zalloc world could
				 * never show up in the kalloc_map... however,
				 * the kernel_map is a different issue... since it
				 * was released back into the zalloc pool, a pointer
				 * would have gotten written over the 'size' that 
				 * the MALLOC was retaining in the first 4 bytes of
				 * the underlying allocation... that pointer ends up 
				 * looking like a really big size on the 2nd FREE and
				 * pushes the kfree into the kernel_map...  we
				 * end up removing a ton of virtual space before we panic
				 * this check causes us to ignore the kfree for a size
				 * that must be 'bogus'... note that it might not be due
				 * to the above scenario, but it would still be wrong and
				 * cause serious damage.
				 */

				OSAddAtomic(1, &kfree_nop_count);
			        return;
		}
		kmem_free(alloc_map, (vm_offset_t)data, size);

		kalloc_spin_lock();

		kalloc_large_total -= size;
		kalloc_large_inuse--;

		kalloc_unlock();

		KALLOC_ZINFO_SFREE(size);
		return;
	}

	/* free to the appropriate zone */
#ifdef KALLOC_DEBUG
	if (size > z->elem_size)
		panic("%s: z %p (%s) but requested size %lu", __func__,
		    z, z->zone_name, (unsigned long)size);
#endif
	assert(size <= z->elem_size);
	zfree(z, data);
}
Exemplo n.º 17
0
static struct kern_coredump_core *
kern_register_coredump_helper_internal(int kern_coredump_config_vers, kern_coredump_callback_config *kc_callbacks,
				void *refcon, const char *core_description, boolean_t xnu_callback, boolean_t is64bit,
				uint32_t mh_magic, cpu_type_t cpu_type, cpu_subtype_t cpu_subtype)
{
	struct kern_coredump_core *core_helper = NULL;
	kern_coredump_callback_config *core_callbacks = NULL;

	if (kern_coredump_config_vers < KERN_COREDUMP_MIN_CONFIG_VERSION)
		return NULL;
	if (kc_callbacks == NULL)
		return NULL;;
	if (core_description == NULL)
		return NULL;

	if (kc_callbacks->kcc_coredump_get_summary == NULL ||
			kc_callbacks->kcc_coredump_save_segment_descriptions == NULL ||
			kc_callbacks->kcc_coredump_save_segment_data == NULL ||
			kc_callbacks->kcc_coredump_save_thread_state == NULL ||
			kc_callbacks->kcc_coredump_save_sw_vers == NULL)
		return NULL;

#if !defined(__LP64__)
	/* We don't support generating 64-bit cores on 32-bit platforms */
	if (is64bit)
		return NULL;
#endif

	core_helper = kalloc(sizeof(*core_helper));
	core_helper->kcc_next = NULL;
	core_helper->kcc_refcon = refcon;
	if (xnu_callback) {
		snprintf((char *)&core_helper->kcc_corename, MACH_CORE_FILEHEADER_NAMELEN, "%s", core_description);
	} else {
		/* Make sure there's room for the -coproc suffix (16 - NULL char - strlen(-coproc)) */
		snprintf((char *)&core_helper->kcc_corename, MACH_CORE_FILEHEADER_NAMELEN, "%.8s-coproc", core_description);
	}
	core_helper->kcc_is64bit = is64bit;
	core_helper->kcc_mh_magic = mh_magic;
	core_helper->kcc_cpu_type = cpu_type;
	core_helper->kcc_cpu_subtype = cpu_subtype;
	core_callbacks = &core_helper->kcc_cb;

	core_callbacks->kcc_coredump_init = kc_callbacks->kcc_coredump_init;
	core_callbacks->kcc_coredump_get_summary = kc_callbacks->kcc_coredump_get_summary;
	core_callbacks->kcc_coredump_save_segment_descriptions = kc_callbacks->kcc_coredump_save_segment_descriptions;
	core_callbacks->kcc_coredump_save_segment_data = kc_callbacks->kcc_coredump_save_segment_data;
	core_callbacks->kcc_coredump_save_thread_state = kc_callbacks->kcc_coredump_save_thread_state;
	core_callbacks->kcc_coredump_save_misc_data = kc_callbacks->kcc_coredump_save_misc_data;
	core_callbacks->kcc_coredump_save_sw_vers = kc_callbacks->kcc_coredump_save_sw_vers;

	if (xnu_callback) {
		assert(kernel_helper == NULL);
		kernel_helper = core_helper;
	} else {
		do {
			core_helper->kcc_next = kern_coredump_core_list;
		} while (!OSCompareAndSwapPtr(kern_coredump_core_list, core_helper, &kern_coredump_core_list));
	}

	OSAddAtomic(1, &coredump_registered_count);
	kprintf("Registered coredump handler for %s\n", core_description);

	return core_helper;
}
Exemplo n.º 18
0
static errno_t
fuse_vfsop_unmount(mount_t mp, int mntflags, vfs_context_t context)
{
    int   err        = 0;
    int   flags      = 0;

    fuse_device_t          fdev;
    struct fuse_data      *data;
    struct fuse_dispatcher fdi;

    vnode_t fuse_rootvp = NULLVP;

    fuse_trace_printf_vfsop();

    if (mntflags & MNT_FORCE) {
        flags |= FORCECLOSE;
    }

    data = fuse_get_mpdata(mp);
    if (!data) {
        panic("fuse4x: no mount private data in vfs_unmount");
    }

#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_lock(data->biglock);
#endif

    fdev = data->fdev;

    if (fdata_dead_get(data)) {

        /*
         * If the file system daemon is dead, it's pointless to try to do
         * any unmount-time operations that go out to user space. Therefore,
         * we pretend that this is a force unmount. However, this isn't of much
         * use. That's because if any non-root vnode is in use, the vflush()
         * that the kernel does before calling our VFS_UNMOUNT will fail
         * if the original unmount wasn't forcible already. That earlier
         * vflush is called with SKIPROOT though, so it wouldn't bail out
         * on the root vnode being in use.
         *
         * If we want, we could set FORCECLOSE here so that a non-forced
         * unmount will be "upgraded" to a forced unmount if the root vnode
         * is busy (you are cd'd to the mount point, for example). It's not
         * quite pure to do that though.
         *
         *    flags |= FORCECLOSE;
         *    log("fuse4x: forcing unmount on a dead file system\n");
         */

    } else if (!(data->dataflags & FSESS_INITED)) {
        flags |= FORCECLOSE;
        log("fuse4x: forcing unmount on not-yet-alive file system\n");
        fdata_set_dead(data);
    }

    fuse_rootvp = data->rootvp;

    fuse_trace_printf("%s: Calling vflush(mp, fuse_rootvp, flags=0x%X);\n", __FUNCTION__, flags);
#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_unlock(data->biglock);
#endif
    err = vflush(mp, fuse_rootvp, flags);
#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_lock(data->biglock);
#endif
    fuse_trace_printf("%s:   Done.\n", __FUNCTION__);
    if (err) {
#if M_FUSE4X_ENABLE_BIGLOCK
        fuse_biglock_unlock(data->biglock);
#endif
        return err;
    }

    if (vnode_isinuse(fuse_rootvp, 1) && !(flags & FORCECLOSE)) {
#if M_FUSE4X_ENABLE_BIGLOCK
        fuse_biglock_unlock(data->biglock);
#endif
        return EBUSY;
    }

    if (fdata_dead_get(data)) {
        goto alreadydead;
    }

    fdisp_init(&fdi, 0 /* no data to send along */);
    fdisp_make(&fdi, FUSE_DESTROY, mp, FUSE_ROOT_ID, context);

    fuse_trace_printf("%s: Waiting for reply from FUSE_DESTROY.\n", __FUNCTION__);
    err = fdisp_wait_answ(&fdi);
    fuse_trace_printf("%s:   Reply received.\n", __FUNCTION__);
    if (!err) {
        fuse_ticket_drop(fdi.tick);
    }

    /*
     * Note that dounmount() signals a VQ_UNMOUNT VFS event.
     */

    fdata_set_dead(data);

alreadydead:

    fuse_trace_printf("%s: Calling vnode_rele(fuse_rootp);\n", __FUNCTION__);
#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_unlock(data->biglock);
#endif
    vnode_rele(fuse_rootvp); /* We got this reference in fuse_vfsop_mount(). */
#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_lock(data->biglock);
#endif
    fuse_trace_printf("%s:   Done.\n", __FUNCTION__);

    data->rootvp = NULLVP;

    fuse_trace_printf("%s: Calling vflush(mp, NULLVP, FORCECLOSE);\n", __FUNCTION__);
#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_unlock(data->biglock);
#endif
    (void)vflush(mp, NULLVP, FORCECLOSE);
#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_lock(data->biglock);
#endif
    fuse_trace_printf("%s:   Done.\n", __FUNCTION__);

    fuse_lck_mtx_lock(fdev->mtx);

    vfs_setfsprivate(mp, NULL);
    data->dataflags &= ~FSESS_MOUNTED;
    OSAddAtomic(-1, (SInt32 *)&fuse_mount_count);

#if M_FUSE4X_ENABLE_BIGLOCK
    fuse_biglock_unlock(data->biglock);
#endif

    if (!(data->dataflags & FSESS_OPENED)) {

        /* fdev->data was left for us to clean up */

        fuse_device_close_final(fdev);

        /* fdev->data is gone now */
    }

    fuse_lck_mtx_unlock(fdev->mtx);

    return 0;
}
Exemplo n.º 19
0
static errno_t
fuse_vfsop_mount(mount_t mp, __unused vnode_t devvp, user_addr_t udata,
                 vfs_context_t context)
{
    int err      = 0;
    int mntopts  = 0;
    bool mounted = false;

    uint32_t drandom  = 0;
    uint32_t max_read = ~0;

    size_t len;

    fuse_device_t      fdev = NULL;
    struct fuse_data  *data = NULL;
    fuse_mount_args    fusefs_args;
    struct vfsstatfs  *vfsstatfsp = vfs_statfs(mp);

    kern_return_t kr;
    thread_t      init_thread;

#if M_OSXFUSE_ENABLE_BIG_LOCK
    fuse_biglock_t    *biglock;
#endif

    fuse_trace_printf_vfsop();

    if (vfs_isupdate(mp)) {
        return ENOTSUP;
    }

    err = copyin(udata, &fusefs_args, sizeof(fusefs_args));
    if (err) {
        return EINVAL;
    }

    /*
     * Interesting flags that we can receive from mount or may want to
     * otherwise forcibly set include:
     *
     *     MNT_ASYNC
     *     MNT_AUTOMOUNTED
     *     MNT_DEFWRITE
     *     MNT_DONTBROWSE
     *     MNT_IGNORE_OWNERSHIP
     *     MNT_JOURNALED
     *     MNT_NODEV
     *     MNT_NOEXEC
     *     MNT_NOSUID
     *     MNT_NOUSERXATTR
     *     MNT_RDONLY
     *     MNT_SYNCHRONOUS
     *     MNT_UNION
     */

#if M_OSXFUSE_ENABLE_UNSUPPORTED
    vfs_setlocklocal(mp);
#endif /* M_OSXFUSE_ENABLE_UNSUPPORTED */

    /** Option Processing. **/

    if (fusefs_args.altflags & FUSE_MOPT_FSTYPENAME) {
        size_t typenamelen = strlen(fusefs_args.fstypename);
        if ((typenamelen <= 0) || (typenamelen > FUSE_FSTYPENAME_MAXLEN)) {
            return EINVAL;
        }
        snprintf(vfsstatfsp->f_fstypename, MFSTYPENAMELEN, "%s%s",
                 OSXFUSE_FSTYPENAME_PREFIX, fusefs_args.fstypename);
    }

    if ((fusefs_args.daemon_timeout > FUSE_MAX_DAEMON_TIMEOUT) ||
        (fusefs_args.daemon_timeout < FUSE_MIN_DAEMON_TIMEOUT)) {
        return EINVAL;
    }

    if (fusefs_args.altflags & FUSE_MOPT_SPARSE) {
        mntopts |= FSESS_SPARSE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_SLOW_STATFS) {
        mntopts |= FSESS_SLOW_STATFS;
    }

    if (fusefs_args.altflags & FUSE_MOPT_AUTO_CACHE) {
        mntopts |= FSESS_AUTO_CACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_AUTO_XATTR) {
        if (fusefs_args.altflags & FUSE_MOPT_NATIVE_XATTR) {
            return EINVAL;
        }
        mntopts |= FSESS_AUTO_XATTR;
    } else if (fusefs_args.altflags & FUSE_MOPT_NATIVE_XATTR) {
        mntopts |= FSESS_NATIVE_XATTR;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_BROWSE) {
        vfs_setflags(mp, MNT_DONTBROWSE);
    }

    if (fusefs_args.altflags & FUSE_MOPT_JAIL_SYMLINKS) {
        mntopts |= FSESS_JAIL_SYMLINKS;
    }

    /*
     * Note that unlike Linux, which keeps allow_root in user-space and
     * passes allow_other in that case to the kernel, we let allow_root
     * reach the kernel. The 'if' ordering is important here.
     */
    if (fusefs_args.altflags & FUSE_MOPT_ALLOW_ROOT) {
        int is_member = 0;
        if ((kauth_cred_ismember_gid(kauth_cred_get(), fuse_admin_group,
                                     &is_member) == 0) && is_member) {
            mntopts |= FSESS_ALLOW_ROOT;
        } else {
            IOLog("OSXFUSE: caller not a member of OSXFUSE admin group (%d)\n",
                  fuse_admin_group);
            return EPERM;
        }
    } else if (fusefs_args.altflags & FUSE_MOPT_ALLOW_OTHER) {
        if (!fuse_allow_other && !fuse_vfs_context_issuser(context)) {
            int is_member = 0;
            if ((kauth_cred_ismember_gid(kauth_cred_get(), fuse_admin_group,
                                         &is_member) != 0) || !is_member) {
                return EPERM;
            }
        }
        mntopts |= FSESS_ALLOW_OTHER;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_APPLEDOUBLE) {
        mntopts |= FSESS_NO_APPLEDOUBLE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_APPLEXATTR) {
        mntopts |= FSESS_NO_APPLEXATTR;
    }

    if ((fusefs_args.altflags & FUSE_MOPT_FSID) && (fusefs_args.fsid != 0)) {
        fsid_t   fsid;
        mount_t  other_mp;
        uint32_t target_dev;

        target_dev = FUSE_MAKEDEV(FUSE_CUSTOM_FSID_DEVICE_MAJOR,
                                  fusefs_args.fsid);

        fsid.val[0] = target_dev;
        fsid.val[1] = FUSE_CUSTOM_FSID_VAL1;

        other_mp = vfs_getvfs(&fsid);
        if (other_mp != NULL) {
            return EPERM;
        }

        vfsstatfsp->f_fsid.val[0] = target_dev;
        vfsstatfsp->f_fsid.val[1] = FUSE_CUSTOM_FSID_VAL1;

    } else {
        vfs_getnewfsid(mp);
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_LOCALCACHES) {
        mntopts |= FSESS_NO_ATTRCACHE;
        mntopts |= FSESS_NO_READAHEAD;
        mntopts |= FSESS_NO_UBC;
        mntopts |= FSESS_NO_VNCACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_ATTRCACHE) {
        mntopts |= FSESS_NO_ATTRCACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_READAHEAD) {
        mntopts |= FSESS_NO_READAHEAD;
    }

    if (fusefs_args.altflags & (FUSE_MOPT_NO_UBC | FUSE_MOPT_DIRECT_IO)) {
        mntopts |= FSESS_NO_UBC;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_VNCACHE) {
        mntopts |= FSESS_NO_VNCACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NEGATIVE_VNCACHE) {
        if (mntopts & FSESS_NO_VNCACHE) {
            return EINVAL;
        }
        mntopts |= FSESS_NEGATIVE_VNCACHE;
    }

    if (fusefs_args.altflags & FUSE_MOPT_NO_SYNCWRITES) {

        /* Cannot mix 'nosyncwrites' with 'noubc' or 'noreadahead'. */
        if (mntopts & (FSESS_NO_READAHEAD | FSESS_NO_UBC)) {
            return EINVAL;
        }

        mntopts |= FSESS_NO_SYNCWRITES;
        vfs_clearflags(mp, MNT_SYNCHRONOUS);
        vfs_setflags(mp, MNT_ASYNC);

        /* We check for this only if we have nosyncwrites in the first place. */
        if (fusefs_args.altflags & FUSE_MOPT_NO_SYNCONCLOSE) {
            mntopts |= FSESS_NO_SYNCONCLOSE;
        }

    } else {
        vfs_clearflags(mp, MNT_ASYNC);
        vfs_setflags(mp, MNT_SYNCHRONOUS);
    }

    if (mntopts & FSESS_NO_UBC) {
        /* If no buffer cache, disallow exec from file system. */
        vfs_setflags(mp, MNT_NOEXEC);
    }

    vfs_setauthopaque(mp);
    vfs_setauthopaqueaccess(mp);

    if ((fusefs_args.altflags & FUSE_MOPT_DEFAULT_PERMISSIONS) &&
        (fusefs_args.altflags & FUSE_MOPT_DEFER_PERMISSIONS)) {
        return EINVAL;
    }

    if (fusefs_args.altflags & FUSE_MOPT_DEFAULT_PERMISSIONS) {
        mntopts |= FSESS_DEFAULT_PERMISSIONS;
        vfs_clearauthopaque(mp);
    }

    if (fusefs_args.altflags & FUSE_MOPT_DEFER_PERMISSIONS) {
        mntopts |= FSESS_DEFER_PERMISSIONS;
    }

    if (fusefs_args.altflags & FUSE_MOPT_EXTENDED_SECURITY) {
        mntopts |= FSESS_EXTENDED_SECURITY;
        vfs_setextendedsecurity(mp);
    }

    if (fusefs_args.altflags & FUSE_MOPT_LOCALVOL) {
        mntopts |= FSESS_LOCALVOL;
        vfs_setflags(mp, MNT_LOCAL);
    }

    /* done checking incoming option bits */

    err = 0;

    vfs_setfsprivate(mp, NULL);

    fdev = fuse_device_get(fusefs_args.rdev);
    if (!fdev) {
        return EINVAL;
    }

    fuse_device_lock(fdev);

    drandom = fuse_device_get_random(fdev);
    if (fusefs_args.random != drandom) {
        fuse_device_unlock(fdev);
        IOLog("OSXFUSE: failing mount because of mismatched random\n");
        return EINVAL;
    }

    data = fuse_device_get_mpdata(fdev);

    if (!data) {
        fuse_device_unlock(fdev);
        return ENXIO;
    }

#if M_OSXFUSE_ENABLE_BIG_LOCK
    biglock = data->biglock;
    fuse_biglock_lock(biglock);
#endif

    if (data->mount_state != FM_NOTMOUNTED) {
#if M_OSXFUSE_ENABLE_BIG_LOCK
        fuse_biglock_unlock(biglock);
#endif
        fuse_device_unlock(fdev);
        return EALREADY;
    }

    if (!(data->dataflags & FSESS_OPENED)) {
        fuse_device_unlock(fdev);
        err = ENXIO;
        goto out;
    }

    data->mount_state = FM_MOUNTED;
    OSAddAtomic(1, (SInt32 *)&fuse_mount_count);
    mounted = true;

    if (fdata_dead_get(data)) {
        fuse_device_unlock(fdev);
        err = ENOTCONN;
        goto out;
    }

    if (!data->daemoncred) {
        panic("OSXFUSE: daemon found but identity unknown");
    }

    if (fuse_vfs_context_issuser(context) &&
        kauth_cred_getuid(vfs_context_ucred(context)) != kauth_cred_getuid(data->daemoncred)) {
        fuse_device_unlock(fdev);
        err = EPERM;
        goto out;
    }

    data->mp = mp;
    data->fdev = fdev;
    data->dataflags |= mntopts;

    data->daemon_timeout.tv_sec =  fusefs_args.daemon_timeout;
    data->daemon_timeout.tv_nsec = 0;
    if (data->daemon_timeout.tv_sec) {
        data->daemon_timeout_p = &(data->daemon_timeout);
    } else {
        data->daemon_timeout_p = (struct timespec *)0;
    }

    data->max_read = max_read;
    data->fssubtype = fusefs_args.fssubtype;
    data->mountaltflags = fusefs_args.altflags;
    data->noimplflags = (uint64_t)0;

    data->blocksize = fuse_round_size(fusefs_args.blocksize,
                                      FUSE_MIN_BLOCKSIZE, FUSE_MAX_BLOCKSIZE);

    data->iosize = fuse_round_size(fusefs_args.iosize,
                                   FUSE_MIN_IOSIZE, FUSE_MAX_IOSIZE);

    if (data->iosize < data->blocksize) {
        data->iosize = data->blocksize;
    }

    data->userkernel_bufsize = FUSE_DEFAULT_USERKERNEL_BUFSIZE;

    copystr(fusefs_args.fsname, vfsstatfsp->f_mntfromname,
            MNAMELEN - 1, &len);
    bzero(vfsstatfsp->f_mntfromname + len, MNAMELEN - len);

    copystr(fusefs_args.volname, data->volname, MAXPATHLEN - 1, &len);
    bzero(data->volname + len, MAXPATHLEN - len);

    /* previous location of vfs_setioattr() */

    vfs_setfsprivate(mp, data);

    fuse_device_unlock(fdev);

    /* Send a handshake message to the daemon. */
    kr = kernel_thread_start(fuse_internal_init, data, &init_thread);
    if (kr != KERN_SUCCESS) {
        IOLog("OSXFUSE: could not start init thread\n");
        err = ENOTCONN;
    } else {
        thread_deallocate(init_thread);
    }

out:
    if (err) {
        vfs_setfsprivate(mp, NULL);

        fuse_device_lock(fdev);
        data = fuse_device_get_mpdata(fdev); /* again */
        if (mounted) {
            OSAddAtomic(-1, (SInt32 *)&fuse_mount_count);
        }
        if (data) {
            data->mount_state = FM_NOTMOUNTED;
            if (!(data->dataflags & FSESS_OPENED)) {
#if M_OSXFUSE_ENABLE_BIG_LOCK
                assert(biglock == data->biglock);
                fuse_biglock_unlock(biglock);
#endif
                fuse_device_close_final(fdev);
                /* data is gone now */
            }
        }
        fuse_device_unlock(fdev);
    } else {
        vnode_t fuse_rootvp = NULLVP;
        err = fuse_vfsop_root(mp, &fuse_rootvp, context);
        if (err) {
            goto out; /* go back and follow error path */
        }
        err = vnode_ref(fuse_rootvp);
#if M_OSXFUSE_ENABLE_BIG_LOCK
        /*
         * Even though fuse_rootvp will not be reclaimed when calling vnode_put
         * because we incremented its usecount by calling vnode_ref release
         * biglock just to be safe.
         */
        fuse_biglock_unlock(biglock);
#endif /* M_OSXFUSE_ENABLE_BIG_LOCK */
        (void)vnode_put(fuse_rootvp);
#if M_OSXFUSE_ENABLE_BIG_LOCK
        fuse_biglock_lock(biglock);
#endif
        if (err) {
            goto out; /* go back and follow error path */
        } else {
            struct vfsioattr ioattr;

            vfs_ioattr(mp, &ioattr);
            ioattr.io_maxreadcnt = ioattr.io_maxwritecnt = data->iosize;
            ioattr.io_segreadcnt = ioattr.io_segwritecnt = data->iosize / PAGE_SIZE;
            ioattr.io_maxsegreadsize = ioattr.io_maxsegwritesize = data->iosize;
            ioattr.io_devblocksize = data->blocksize;
            vfs_setioattr(mp, &ioattr);
        }
    }

#if M_OSXFUSE_ENABLE_BIG_LOCK
    fuse_device_lock(fdev);
    data = fuse_device_get_mpdata(fdev); /* ...and again */
    if(data) {
        assert(data->biglock == biglock);
        fuse_biglock_unlock(biglock);
    }
    fuse_device_unlock(fdev);
#endif

    return err;
}