Пример #1
0
Файл: net.c Проект: ctos/bpi
/* Free all sk_buffs on the done list.
   This routine is called by the iodone thread in ds_routines.c.  */
void
free_skbuffs ()
{
  struct sk_buff *skb;

  while (1)
    {
      skb = skb_dequeue (&skb_done_list);
      if (skb)
	{
	  if (skb->copy)
	    {
	      vm_map_copy_discard (skb->copy);
	      skb->copy = NULL;
	    }
	  if (IP_VALID (skb->reply))
	    {
	      ds_device_write_reply (skb->reply, skb->reply_type, 0, skb->len);
	      skb->reply = IP_NULL;
	    }
	  dev_kfree_skb (skb, FREE_WRITE);
	}
      else
	break;
    }
}
Пример #2
0
/*
 * fileport_walk
 *
 * Description: Invoke the action function on every fileport in the task.
 *
 *		This could be more efficient if we refactored mach_port_names()
 *		so that (a) it didn't compute the type information unless asked
 *		and (b) it could be asked to -not- unwire/copyout the memory
 *		and (c) if we could ask for port names by kobject type. Not
 *		clear that it's worth all that complexity, though.
 *
 * Parameters: 	task		The target task
 *		action		The function to invoke on each fileport
 *		arg		Anonymous pointer to caller state.
 */
kern_return_t
fileport_walk(task_t task,
	int (*action)(mach_port_name_t, struct fileglob *, void *arg),
	void *arg)
{
	mach_port_name_t *names;
	mach_msg_type_number_t ncnt, tcnt;
	vm_map_copy_t map_copy_names, map_copy_types;
	vm_map_address_t map_names;
	kern_return_t kr;
	uint_t i;
	int rval;

	/*
	 * mach_port_names returns the 'name' and 'types' in copied-in
	 * form.  Discard 'types' immediately, then copyout 'names'
	 * back into the kernel before walking the array.
	 */

	kr = mach_port_names(task->itk_space,
	    (mach_port_name_t **)&map_copy_names, &ncnt,
	    (mach_port_type_t **)&map_copy_types, &tcnt);
	if (kr != KERN_SUCCESS)
		return (kr);

	vm_map_copy_discard(map_copy_types);

	kr = vm_map_copyout(ipc_kernel_map, &map_names, map_copy_names);
	if (kr != KERN_SUCCESS) {
		vm_map_copy_discard(map_copy_names);
		return (kr);
	}
	names = (mach_port_name_t *)(uintptr_t)map_names;

	for (rval = 0, i = 0; i < ncnt; i++)
		if (fileport_invoke(task, names[i], action, arg,
		    &rval) == KERN_SUCCESS && -1 == rval)
			break;		/* early termination clause */

	vm_deallocate(ipc_kernel_map,
	    (vm_address_t)names, ncnt * sizeof (*names));
	return (KERN_SUCCESS);
}
Пример #3
0
kern_return_t
map_fd_funneled(
	int			fd,
	vm_object_offset_t	offset,
	vm_offset_t		*va,
	boolean_t		findspace,
	vm_size_t		size)
{
	kern_return_t	result;
	struct fileproc	*fp;
	struct vnode	*vp;
	void *	pager;
	vm_offset_t	map_addr=0;
	vm_size_t	map_size;
	int		err=0;
	vm_map_t	my_map;
	proc_t		p = current_proc();
	struct vnode_attr vattr;

	/*
	 *	Find the inode; verify that it's a regular file.
	 */

	err = fp_lookup(p, fd, &fp, 0);
	if (err)
		return(err);
	
	if (fp->f_fglob->fg_type != DTYPE_VNODE){
		err = KERN_INVALID_ARGUMENT;
		goto bad;
	}

	if (!(fp->f_fglob->fg_flag & FREAD)) {
		err = KERN_PROTECTION_FAILURE;
		goto bad;
	}

	vp = (struct vnode *)fp->f_fglob->fg_data;
	err = vnode_getwithref(vp);
	if(err != 0) 
		goto bad;

	if (vp->v_type != VREG) {
		(void)vnode_put(vp);
		err = KERN_INVALID_ARGUMENT;
		goto bad;
	}

	AUDIT_ARG(vnpath, vp, ARG_VNODE1);

	/*
	 * POSIX: mmap needs to update access time for mapped files
	 */
	if ((vnode_vfsvisflags(vp) & MNT_NOATIME) == 0) {
		VATTR_INIT(&vattr);
		nanotime(&vattr.va_access_time);
		VATTR_SET_ACTIVE(&vattr, va_access_time);
		vnode_setattr(vp, &vattr, vfs_context_current());
	}
	
	if (offset & PAGE_MASK_64) {
		printf("map_fd: file offset not page aligned(%d : %s)\n",p->p_pid, p->p_comm);
		(void)vnode_put(vp);
		err = KERN_INVALID_ARGUMENT;
		goto bad;
	}
	map_size = round_page(size);

	/*
	 * Allow user to map in a zero length file.
	 */
	if (size == 0) {
		(void)vnode_put(vp);
		err = KERN_SUCCESS;
		goto bad;
	}
	/*
	 *	Map in the file.
	 */
	pager = (void *)ubc_getpager(vp);
	if (pager == NULL) {
		(void)vnode_put(vp);
		err = KERN_FAILURE;
		goto bad;
	}


	my_map = current_map();

	result = vm_map_64(
			my_map,
			&map_addr, map_size, (vm_offset_t)0, 
			VM_FLAGS_ANYWHERE, pager, offset, TRUE,
			VM_PROT_DEFAULT, VM_PROT_ALL,
			VM_INHERIT_DEFAULT);
	if (result != KERN_SUCCESS) {
		(void)vnode_put(vp);
		err = result;
		goto bad;
	}


	if (!findspace) {
		vm_offset_t	dst_addr;
		vm_map_copy_t	tmp;

		if (copyin(CAST_USER_ADDR_T(va), &dst_addr, sizeof (dst_addr))	||
					trunc_page_32(dst_addr) != dst_addr) {
			(void) vm_map_remove(
					my_map,
					map_addr, map_addr + map_size,
					VM_MAP_NO_FLAGS);
			(void)vnode_put(vp);
			err = KERN_INVALID_ADDRESS;
			goto bad;
		}

		result = vm_map_copyin(my_map, (vm_map_address_t)map_addr,
				       (vm_map_size_t)map_size, TRUE, &tmp);
		if (result != KERN_SUCCESS) {
			
			(void) vm_map_remove(my_map, vm_map_trunc_page(map_addr),
					vm_map_round_page(map_addr + map_size),
					VM_MAP_NO_FLAGS);
			(void)vnode_put(vp);
			err = result;
			goto bad;
		}

		result = vm_map_copy_overwrite(my_map,
					(vm_map_address_t)dst_addr, tmp, FALSE);
		if (result != KERN_SUCCESS) {
			vm_map_copy_discard(tmp);
			(void)vnode_put(vp);
			err = result;
			goto bad;
		}
	} else {
		if (copyout(&map_addr, CAST_USER_ADDR_T(va), sizeof (map_addr))) {
			(void) vm_map_remove(my_map, vm_map_trunc_page(map_addr),
					vm_map_round_page(map_addr + map_size),
					VM_MAP_NO_FLAGS);
			(void)vnode_put(vp);
			err = KERN_INVALID_ADDRESS;
			goto bad;
		}
	}

	ubc_setthreadcred(vp, current_proc(), current_thread());
	(void)ubc_map(vp, (PROT_READ | PROT_EXEC));
	(void)vnode_put(vp);
	err = 0;
bad:
	fp_drop(p, fd, fp, 0);
	return (err);
}
Пример #4
0
/*
 *	Loads a symbol table for an external file into the kernel debugger.
 *	The symbol table data is an array of characters.  It is assumed that
 *	the caller and the kernel debugger agree on its format.
 */
kern_return_t
host_load_symbol_table(
	host_t		host,
	task_t		task,
	char *		name,
	pointer_t	symtab,
	unsigned int	symtab_count)
{
	kern_return_t	result;
	vm_offset_t	symtab_start;
	vm_offset_t	symtab_end;
	vm_map_t	map;
	vm_map_copy_t	symtab_copy_object;

	if (host == HOST_NULL)
	    return (KERN_INVALID_ARGUMENT);

	/*
	 * Copy the symbol table array into the kernel.
	 * We make a copy of the copy object, and clear
	 * the old one, so that returning error will not
	 * deallocate the data twice.
	 */
	symtab_copy_object = (vm_map_copy_t) symtab;
	result = vm_map_copyout(
			kernel_map,
			&symtab_start,
			vm_map_copy_copy(symtab_copy_object));
	if (result != KERN_SUCCESS)
	    return (result);

	symtab_end = symtab_start + symtab_count;

	/*
	 * Add the symbol table.
	 * Do not keep a reference for the task map.	XXX
	 */
	if (task == TASK_NULL)
	    map = VM_MAP_NULL;
	else
	    map = task->map;
	if (!X_db_sym_init((char *)symtab_start,
			(char *)symtab_end,
			name,
			(char *)map))
	{
	    /*
	     * Not enough room for symbol table - failure.
	     */
	    (void) vm_deallocate(kernel_map,
			symtab_start,
			symtab_count);
	    return (KERN_FAILURE);
	}

	/*
	 * Wire down the symbol table
	 */
	(void) vm_map_pageable(kernel_map,
		symtab_start,
		round_page(symtab_end),
		VM_PROT_READ|VM_PROT_WRITE);

	/*
	 * Discard the original copy object
	 */
	vm_map_copy_discard(symtab_copy_object);

	return (KERN_SUCCESS);
}
Пример #5
0
/*
 * Push data from a copy-call strategy object into its copy.
 */
void
svm_copy_push_page(
	xmm_obj_t	mobj,
	vm_offset_t	offset,
	vm_map_copy_t	data)
{
	unsigned int page = atop(offset);
	xmm_obj_t copy;
	kern_return_t kr;
	char *m_bits;

#if	COPY_CALL_PUSH_PAGE_TO_KERNEL
	xmm_obj_t kobj;
	char *k_bits;
#endif	/* COPY_CALL_PUSH_PAGE_TO_KERNEL */

	/*
	 * We have a real page that much be copied into the copy object
	 * before the kernel can be permitted to modify it.
	 * We will need to return the page to the copy object's pager
	 * and mark the page as no longer needing to be copied.
	 *
	 * Make sure we have a copy mobj. If the current copy mobj is frozen,
	 * create and use a new one. In either case, mark the copy mobj dirty.
	 */
	xmm_obj_lock(mobj);
	for (;;) {
		copy = svm_get_stable_copy(mobj);

		/*
		 * A MP race condition may occur between the copy detection
		 * made in svm_satisfy_kernel_request() and now. If the copy
		 * has been destroyed, then release data and return.
		 */
		if (copy == XMM_OBJ_NULL) {
			M_MAP_GET(MOBJ, page, m_bits);
			assert(!M_GET_NEEDS_COPY(m_bits));
			xmm_obj_unlock(mobj);
			vm_map_copy_discard(data);
			return;
		}

		xmm_obj_lock(copy);
		if (COPY->state == MOBJ_STATE_TERMINATED) {
			xmm_obj_unlock(copy);
			xmm_obj_unlock(mobj);
			vm_map_copy_discard(data);
			return;
		}
		
#if	MACH_PAGEMAP
		if (COPY->pagemap != VM_EXTERNAL_NULL) {
			xmm_obj_unlock(copy);
			svm_create_new_copy(mobj);
			continue;
		}
#endif	/* MACH_PAGEMAP */
		break;
	}
	M_MAP_GET(MOBJ, page, m_bits);
	M_CLR_NEEDS_COPY(m_bits);
	xmm_obj_unlock(mobj);

	COPY->dirty_copy = TRUE;
	COPY->k_count++;

	/*
	 * Initialize copy if we need to.
	 */
	if (COPY->state == MOBJ_STATE_UNCALLED ||
	    COPY->state == MOBJ_STATE_CALLED) {
		svm_mobj_initialize(copy, TRUE, 0);
	}
	assert(COPY->state == MOBJ_STATE_READY ||
	       COPY->state == MOBJ_STATE_SHOULD_TERMINATE);

#if	COPY_CALL_PUSH_PAGE_TO_KERNEL
	/*
	 * If there is a local kobj associated to the copy object to which
	 * the page must be returned, then the page is supplied to the local
	 * kernel and marked precious.
	 */
	for (kobj = COPY->kobj_list; kobj; kobj = KOBJ->next)
		if (KOBJ->local)
			break;
	if (kobj != XMM_OBJ_NULL && svm_copy_call_push_page_to_kernel) {
		/* consumes locks */
		svm_copy_supply(kobj, offset, data);
	} else
#endif	/* COPY_CALL_PUSH_PAGE_TO_KERNEL */

	/*
	 * Write page into pager backing copy object. The page is dirty, and
	 * the copy object itself has not kept a copy of the data. Mark the
	 * page as no longer needing to be copied.
	 */
	{
		xmm_obj_unlock(copy);
		svm_prepare_copy(data);

		kr = M_DATA_RETURN(copy, offset, data,
				   PAGE_SIZE, TRUE, FALSE);
		assert(kr == KERN_SUCCESS);
	}

	xmm_obj_lock(copy);
	if (--COPY->k_count == 0 &&
	    COPY->state == MOBJ_STATE_SHOULD_TERMINATE) {
		xmm_terminate_pending--;
		xmm_svm_destroy(copy); /* consumes lock */
	} else
		xmm_obj_unlock(copy);

	/*
	 * We don't have to flush page from any kernel.
	 * If we are ready to grant write permission
	 * to this kernel, then no other kernel can
	 * have this page mapped. Thus we only have to
	 * worry about the requesting kernel having
	 * this page mapped for the copy. However,
	 * vm_fault_page will handle this case for us:
	 * granting write permission to a page in
	 * an object with a copy_call strategy will
	 * cause the page to be unmapped from all
	 * address spaces before write permission
	 * is enabled.
	 */
}
Пример #6
0
/*********************************************************************
* IMPORTANT: Once we have done the vm_map_copyout(), we *must* return
* KERN_SUCCESS or the kernel map gets messed up (reason as yet
* unknown). We use op_result to return the real result of our work.
*********************************************************************/
kern_return_t kext_request(
    host_priv_t                             hostPriv,
    /* in only */  uint32_t                 clientLogSpec,
    /* in only */  vm_offset_t              requestIn,
    /* in only */  mach_msg_type_number_t   requestLengthIn,
    /* out only */ vm_offset_t            * responseOut,
    /* out only */ mach_msg_type_number_t * responseLengthOut,
    /* out only */ vm_offset_t            * logDataOut,
    /* out only */ mach_msg_type_number_t * logDataLengthOut,
    /* out only */ kern_return_t          * op_result)
{
    kern_return_t     result          = KERN_FAILURE;
    vm_map_address_t  map_addr        = 0;     // do not free/deallocate
    char            * request         = NULL;  // must vm_deallocate

    mkext2_header   * mkextHeader     = NULL;  // do not release
    bool              isMkext         = false;

    char            * response        = NULL;  // must kmem_free
    uint32_t          responseLength  = 0;
    char            * logData         = NULL;  // must kmem_free
    uint32_t          logDataLength   = 0;

   /* MIG doesn't pass "out" parameters as empty, so clear them immediately
    * just in case, or MIG will try to copy out bogus data.
    */    
    *op_result = KERN_FAILURE;
    *responseOut = NULL;
    *responseLengthOut = 0;
    *logDataOut = NULL;
    *logDataLengthOut = 0;

   /* Check for input. Don't discard what isn't there, though.
    */
    if (!requestLengthIn || !requestIn) {
		OSKextLog(/* kext */ NULL,
            kOSKextLogErrorLevel |
            kOSKextLogIPCFlag,
            "Invalid request from user space (no data).");
        *op_result = KERN_INVALID_ARGUMENT;
        goto finish;
    }

   /* Once we have done the vm_map_copyout(), we *must* return KERN_SUCCESS
    * or the kernel map gets messed up (reason as yet unknown). We will use
    * op_result to return the real result of our work.
    */
    result = vm_map_copyout(kernel_map, &map_addr, (vm_map_copy_t)requestIn);
    if (result != KERN_SUCCESS) {
        OSKextLog(/* kext */ NULL,
            kOSKextLogErrorLevel |
            kOSKextLogIPCFlag,
            "vm_map_copyout() failed for request from user space.");
        vm_map_copy_discard((vm_map_copy_t)requestIn);
        goto finish;
    }
    request = CAST_DOWN(char *, map_addr);

   /* Check if request is an mkext; this is always a load request
    * and requires root access. If it isn't an mkext, see if it's
    * an XML request, and check the request to see if that requires
    * root access.
    */
    if (requestLengthIn > sizeof(mkext2_header)) {
        mkextHeader = (mkext2_header *)request;
        if (MKEXT_GET_MAGIC(mkextHeader) == MKEXT_MAGIC &&
            MKEXT_GET_SIGNATURE(mkextHeader) == MKEXT_SIGN) {

            isMkext = true;
        }
    }

    if (isMkext) {
#ifdef SECURE_KERNEL
        // xxx - something tells me if we have a secure kernel we don't even
        // xxx - want to log a message here. :-)
        *op_result = KERN_NOT_SUPPORTED;
        goto finish;
#else
        // xxx - can we find out if calling task is kextd?
        // xxx - can we find the name of the calling task?
        if (hostPriv == HOST_PRIV_NULL) {
            OSKextLog(/* kext */ NULL,
                kOSKextLogErrorLevel |
                kOSKextLogLoadFlag | kOSKextLogIPCFlag,
                "Attempt by non-root process to load a kext.");
            *op_result = kOSKextReturnNotPrivileged;
            goto finish;
        }

        *op_result = OSKext::loadFromMkext((OSKextLogSpec)clientLogSpec,
            request, requestLengthIn,
            &logData, &logDataLength);

#endif /* defined(SECURE_KERNEL) */

    } else {

       /* If the request isn't an mkext, then is should be XML. Parse it
        * if possible and hand the request over to OSKext.
        */
        *op_result = OSKext::handleRequest(hostPriv,
            (OSKextLogSpec)clientLogSpec,
            request, requestLengthIn,
            &response, &responseLength,
            &logData, &logDataLength);
    }

    if (response && responseLength > 0) {
        kern_return_t copyin_result;

        copyin_result = vm_map_copyin(kernel_map,
            CAST_USER_ADDR_T(response), responseLength,
            /* src_destroy */ false, (vm_map_copy_t *)responseOut);
        if (copyin_result == KERN_SUCCESS) {
            *responseLengthOut = responseLength;
        } else {
            OSKextLog(/* kext */ NULL,
                kOSKextLogErrorLevel |
                kOSKextLogIPCFlag,
                "Failed to copy response to request from user space.");
            *op_result = copyin_result;  // xxx - should we map to our own code?
            *responseOut = NULL;
            *responseLengthOut = 0;
            goto finish;
        }
    }

    if (logData && logDataLength > 0) {
        kern_return_t copyin_result;

        copyin_result = vm_map_copyin(kernel_map,
            CAST_USER_ADDR_T(logData), logDataLength,
            /* src_destroy */ false, (vm_map_copy_t *)logDataOut);
        if (copyin_result == KERN_SUCCESS) {
            *logDataLengthOut = logDataLength;
        } else {
            OSKextLog(/* kext */ NULL,
                kOSKextLogErrorLevel |
                kOSKextLogIPCFlag,
                "Failed to copy log data for request from user space.");
            *op_result = copyin_result;  // xxx - should we map to our own code?
            *logDataOut = NULL;
            *logDataLengthOut = 0;
            goto finish;
        }
    }

finish:
    if (request) {
        (void)vm_deallocate(kernel_map, (vm_offset_t)request, requestLengthIn);
    }
    if (response) {
        kmem_free(kernel_map, (vm_offset_t)response, responseLength);
    }
    if (logData) {
        kmem_free(kernel_map, (vm_offset_t)logData, logDataLength);
    }

    return result;
}
Пример #7
0
static
load_return_t
load_dylinker(
    struct dylinker_command	*lcp,
    integer_t		archbits,
    vm_map_t		map,
    thread_t	thread,
    int			depth,
    load_result_t		*result,
    boolean_t		is_64bit
)
{
    char			*name;
    char			*p;
    struct vnode		*vp = NULLVP;	/* set by get_macho_vnode() */
    struct mach_header	header;
    off_t			file_offset = 0; /* set by get_macho_vnode() */
    off_t			macho_size = 0;	/* set by get_macho_vnode() */
    vm_map_t		copy_map;
    load_result_t		myresult;
    kern_return_t		ret;
    vm_map_copy_t	tmp;
    mach_vm_offset_t	dyl_start, map_addr;
    mach_vm_size_t		dyl_length;

    name = (char *)lcp + lcp->name.offset;
    /*
     *	Check for a proper null terminated string.
     */
    p = name;
    do {
        if (p >= (char *)lcp + lcp->cmdsize)
            return(LOAD_BADMACHO);
    } while (*p++);

    ret = get_macho_vnode(name, archbits, &header, &file_offset, &macho_size, &vp);
    if (ret)
        return (ret);

    myresult = load_result_null;

    /*
     *	First try to map dyld in directly.  This should work most of
     *	the time since there shouldn't normally be something already
     *	mapped to its address.
     */

    ret = parse_machfile(vp, map, thread, &header, file_offset, macho_size,
                         depth, &myresult);

    /*
     *	If it turned out something was in the way, then we'll take
     *	take this longer path to map dyld into a temporary map and
     *	copy it into destination map at a different address.
     */

    if (ret == LOAD_NOSPACE) {

        /*
         *	Load the Mach-O.
         *	Use a temporary map to do the work.
         */
        copy_map = vm_map_create(pmap_create(vm_map_round_page(macho_size),
                                             is_64bit),
                                 get_map_min(map), get_map_max(map), TRUE);
        if (VM_MAP_NULL == copy_map) {
            ret = LOAD_RESOURCE;
            goto out;
        }

        myresult = load_result_null;

        ret = parse_machfile(vp, copy_map, thread, &header,
                             file_offset, macho_size,
                             depth, &myresult);

        if (ret) {
            vm_map_deallocate(copy_map);
            goto out;
        }

        if (get_map_nentries(copy_map) > 0) {

            dyl_start = mach_get_vm_start(copy_map);
            dyl_length = mach_get_vm_end(copy_map) - dyl_start;

            map_addr = dyl_start;
            ret = mach_vm_allocate(map, &map_addr, dyl_length, VM_FLAGS_ANYWHERE);

            if (ret != KERN_SUCCESS) {
                vm_map_deallocate(copy_map);
                ret = LOAD_NOSPACE;
                goto out;

            }

            ret = vm_map_copyin(copy_map,
                                (vm_map_address_t)dyl_start,
                                (vm_map_size_t)dyl_length,
                                TRUE, &tmp);
            if (ret != KERN_SUCCESS) {
                (void) vm_map_remove(map,
                                     vm_map_trunc_page(map_addr),
                                     vm_map_round_page(map_addr + dyl_length),
                                     VM_MAP_NO_FLAGS);
                vm_map_deallocate(copy_map);
                goto out;
            }

            ret = vm_map_copy_overwrite(map,
                                        (vm_map_address_t)map_addr,
                                        tmp, FALSE);
            if (ret != KERN_SUCCESS) {
                vm_map_copy_discard(tmp);
                (void) vm_map_remove(map,
                                     vm_map_trunc_page(map_addr),
                                     vm_map_round_page(map_addr + dyl_length),
                                     VM_MAP_NO_FLAGS);
                vm_map_deallocate(copy_map);
                goto out;
            }

            if (map_addr != dyl_start)
                myresult.entry_point += (map_addr - dyl_start);
        } else {
            ret = LOAD_FAILURE;
        }

        vm_map_deallocate(copy_map);
    }

    if (ret == LOAD_SUCCESS) {
        result->dynlinker = TRUE;
        result->entry_point = myresult.entry_point;
        (void)ubc_map(vp, PROT_READ | PROT_EXEC);
    }
out:
    vnode_put(vp);
    return (ret);

}
Пример #8
0
Файл: net.c Проект: ctos/bpi
static io_return_t
device_write (void *d, ipc_port_t reply_port,
	      mach_msg_type_name_t reply_port_type, dev_mode_t mode,
	      recnum_t bn, io_buf_ptr_t data, unsigned int count,
	      int *bytes_written)
{
  unsigned char *p;
  int i, amt, skblen, s;
  io_return_t err = 0;
  vm_map_copy_t copy = (vm_map_copy_t) data;
  struct net_data *nd = d;
  struct linux_device *dev = nd->dev;
  struct sk_buff *skb;

  if (count == 0 || count > dev->mtu + dev->hard_header_len)
    return D_INVALID_SIZE;

  /* Allocate a sk_buff.  */
  amt = PAGE_SIZE - (copy->offset & PAGE_MASK);
  skblen = (amt >= count) ? 0 : count;
  skb = dev_alloc_skb (skblen);
  if (!skb)
    return D_NO_MEMORY;

  /* Copy user data.  This is only required if it spans multiple pages.  */
  if (skblen == 0)
    {
      assert (copy->cpy_npages == 1);

      skb->copy = copy;
      skb->data = ((void *) copy->cpy_page_list[0]->phys_addr
		   + (copy->offset & PAGE_MASK));
      skb->len = count;
      skb->head = skb->data;
      skb->tail = skb->data + skb->len;
      skb->end = skb->tail;
    }
  else
    {
      skb->len = skblen;
      skb->tail = skb->data + skblen;
      skb->end = skb->tail;
      
      memcpy (skb->data,
	      ((void *) copy->cpy_page_list[0]->phys_addr
	       + (copy->offset & PAGE_MASK)),
	      amt);
      count -= amt;
      p = skb->data + amt;
      for (i = 1; count > 0 && i < copy->cpy_npages; i++)
	{
	  amt = PAGE_SIZE;
	  if (amt > count)
	    amt = count;
	  memcpy (p, (void *) copy->cpy_page_list[i]->phys_addr, amt);
	  count -= amt;
	  p += amt;
	}

      assert (count == 0);

      vm_map_copy_discard (copy);
    }

  skb->dev = dev;
  skb->reply = reply_port;
  skb->reply_type = reply_port_type;

  /* Queue packet for transmission and schedule a software interrupt.  */
  s = splimp ();
  if (dev->buffs[0].next != (struct sk_buff *) &dev->buffs[0]
      || (*dev->hard_start_xmit) (skb, dev))
    {
      __skb_queue_tail (&dev->buffs[0], skb);
      mark_bh (NET_BH);
    }
  splx (s);

  /* Send packet to filters.  */
  {
    struct packet_header *packet;
    struct ether_header *header;
    ipc_kmsg_t kmsg;

    kmsg = net_kmsg_get ();

    if (kmsg != IKM_NULL)
      {
        /* Suitable for Ethernet only.  */
        header = (struct ether_header *) (net_kmsg (kmsg)->header);
        packet = (struct packet_header *) (net_kmsg (kmsg)->packet);
        memcpy (header, skb->data, sizeof (struct ether_header));

        /* packet is prefixed with a struct packet_header,
           see include/device/net_status.h.  */
        memcpy (packet + 1, skb->data + sizeof (struct ether_header),
                skb->len - sizeof (struct ether_header));
        packet->length = skb->len - sizeof (struct ether_header)
                         + sizeof (struct packet_header);
        packet->type = header->ether_type;
        net_kmsg (kmsg)->sent = TRUE; /* Mark packet as sent.  */
        s = splimp ();
        net_packet (&dev->net_data->ifnet, kmsg, packet->length,
                    ethernet_priority (kmsg));
        splx (s);
      }
  }

  return MIG_NO_REPLY;
}