示例#1
0
static void *
acpi_alloc_wakeup_handler(void)
{
	void		*wakeaddr;
	int		i;

	/*
	 * Specify the region for our wakeup code.  We want it in the low 1 MB
	 * region, excluding real mode IVT (0-0x3ff), BDA (0x400-0x4ff), EBDA
	 * (less than 128KB, below 0xa0000, must be excluded by SMAP and DSDT),
	 * and ROM area (0xa0000 and above).  The temporary page tables must be
	 * page-aligned.
	 */
	wakeaddr = contigmalloc((ACPI_PAGETABLES + 1) * PAGE_SIZE, M_DEVBUF,
	    M_WAITOK, 0x500, 0xa0000, PAGE_SIZE, 0ul);
	if (wakeaddr == NULL) {
		printf("%s: can't alloc wake memory\n", __func__);
		return (NULL);
	}
	if (EVENTHANDLER_REGISTER(power_resume, acpi_stop_beep, NULL,
	    EVENTHANDLER_PRI_LAST) == NULL) {
		printf("%s: can't register event handler\n", __func__);
		contigfree(wakeaddr, (ACPI_PAGETABLES + 1) * PAGE_SIZE,
		    M_DEVBUF);
		return (NULL);
	}
	susppcbs = malloc(mp_ncpus * sizeof(*susppcbs), M_DEVBUF, M_WAITOK);
	for (i = 0; i < mp_ncpus; i++) {
		susppcbs[i] = malloc(sizeof(**susppcbs), M_DEVBUF, M_WAITOK);
		susppcbs[i]->sp_fpususpend = alloc_fpusave(M_WAITOK);
	}

	return (wakeaddr);
}
示例#2
0
/*
 * svm_enable() - Called to enable SVM extensions on every processor.
 */
static int svm_enable(void) {
	uint64_t efer;
	int origcpu;
	int i;
	vm_paddr_t vm_hsave_pa;

	if (!svm_available)
		return (ENODEV);

	KKASSERT(svm_enabled == 0);

	/* Set EFER.SVME and allocate a VM Host Save Area on every cpu */
	origcpu = mycpuid;
	for (i = 0; i < ncpus; i++) {
		lwkt_migratecpu(i);

		efer = rdmsr(MSR_EFER);
		efer |= EFER_SVME;
		wrmsr(MSR_EFER, efer);

		vm_hsave_va[i] = (vm_offset_t) contigmalloc(4096, M_TEMP,
							    M_WAITOK | M_ZERO,
							    0, 0xffffffff,
							    4096, 0);
		vm_hsave_pa = vtophys(vm_hsave_va[i]);
		wrmsr(MSR_AMD_VM_HSAVE_PA, vm_hsave_pa);
	}
	lwkt_migratecpu(origcpu);

	svm_enabled = 1;

	return (0);
}
示例#3
0
static void
ofw_real_bounce_alloc(void *junk)
{
	/*
	 * Check that ofw_real is actually in use before allocating wads 
	 * of memory. Do this by checking if our mutex has been set up.
	 */
	if (!mtx_initialized(&of_bounce_mtx))
		return;

	/*
	 * Allocate a page of contiguous, wired physical memory that can
	 * fit into a 32-bit address space and accessed from real mode.
	 */

	mtx_lock(&of_bounce_mtx);

	of_bounce_virt = contigmalloc(4 * PAGE_SIZE, M_OFWREAL, 0, 0,
	    ulmin(platform_real_maxaddr(), BUS_SPACE_MAXADDR_32BIT), PAGE_SIZE,
	    4 * PAGE_SIZE);

	of_bounce_phys = vtophys(of_bounce_virt);
	of_bounce_size = 4 * PAGE_SIZE;

	/*
	 * For virtual-mode OF, direct map this physical address so that
	 * we have a 32-bit virtual address to give OF.
	 */

	if (!ofw_real_mode && !hw_direct_map) 
		pmap_kenter(of_bounce_phys, of_bounce_phys);

	mtx_unlock(&of_bounce_mtx);
}
示例#4
0
/*
 * Setup a DMA channel's bounce buffer.
 */
void
isa_dmainit(int chan, u_int bouncebufsize)
{
	void *buf;

#ifdef DIAGNOSTIC
	if (chan & ~VALID_DMA_MASK)
		panic("isa_dmainit: channel out of range");

	if (dma_bouncebuf[chan] != NULL)
		panic("isa_dmainit: impossible request"); 
#endif

	dma_bouncebufsize[chan] = bouncebufsize;

	/* Try malloc() first.  It works better if it works. */
	buf = kmalloc(bouncebufsize, M_DEVBUF, M_NOWAIT);
	if (buf != NULL) {
		if (isa_dmarangecheck(buf, bouncebufsize, chan) == 0) {
			dma_bouncebuf[chan] = buf;
			return;
		}
		kfree(buf, M_DEVBUF);
	}
	buf = contigmalloc(bouncebufsize, M_DEVBUF, M_NOWAIT, 0ul, 0xfffffful,
			   1ul, chan & 4 ? 0x20000ul : 0x10000ul);
	if (buf == NULL)
		kprintf("isa_dmainit(%d, %d) failed\n", chan, bouncebufsize);
	else
		dma_bouncebuf[chan] = buf;
}
示例#5
0
/**
 * @brief This method will be invoked to allocate memory dynamically.
 *
 * @param[in]  controller This parameter represents the controller
 *             object for which to allocate memory.
 * @param[out] mde This parameter represents the memory descriptor to
 *             be filled in by the user that will reference the newly
 *             allocated memory.
 *
 * @return none
 */
void scif_cb_controller_allocate_memory(SCI_CONTROLLER_HANDLE_T controller,
    SCI_PHYSICAL_MEMORY_DESCRIPTOR_T *mde)
{
	struct ISCI_CONTROLLER *isci_controller = (struct ISCI_CONTROLLER *)
	    sci_object_get_association(controller);

	/*
	 * Note this routine is only used for buffers needed to translate
	 * SCSI UNMAP commands to ATA DSM commands for SATA disks.
	 *
	 * We first try to pull a buffer from the controller's pool, and only
	 * call contigmalloc if one isn't there.
	 */
	if (!sci_pool_empty(isci_controller->unmap_buffer_pool)) {
		sci_pool_get(isci_controller->unmap_buffer_pool,
		    mde->virtual_address);
	} else
		mde->virtual_address = contigmalloc(PAGE_SIZE,
		    M_ISCI, M_NOWAIT, 0, BUS_SPACE_MAXADDR,
		    mde->constant_memory_alignment, 0);

	if (mde->virtual_address != NULL)
		bus_dmamap_load(isci_controller->buffer_dma_tag,
		    NULL, mde->virtual_address, PAGE_SIZE,
		    isci_single_map, &mde->physical_address,
		    BUS_DMA_NOWAIT);
}
示例#6
0
static int
ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, size_t size)
{
	struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
	size_t xlat_size, buff_size;
	int rc;

	if (size == 0)
		return (EINVAL);

	xlat_size = roundup(size, mw->xlat_align_size);
	buff_size = xlat_size;

	/* No need to re-setup */
	if (mw->xlat_size == xlat_size)
		return (0);

	if (mw->buff_size != 0)
		ntb_free_mw(nt, num_mw);

	/* Alloc memory for receiving data.  Must be aligned */
	mw->xlat_size = xlat_size;
	mw->buff_size = buff_size;

	mw->virt_addr = contigmalloc(mw->buff_size, M_NTB_IF, M_ZERO, 0,
	    mw->addr_limit, mw->xlat_align, 0);
	if (mw->virt_addr == NULL) {
		ntb_printf(0, "Unable to allocate MW buffer of size %zu/%zu\n",
		    mw->buff_size, mw->xlat_size);
		mw->xlat_size = 0;
		mw->buff_size = 0;
		return (ENOMEM);
	}
	/* TODO: replace with bus_space_* functions */
	mw->dma_addr = vtophys(mw->virt_addr);

	/*
	 * Ensure that the allocation from contigmalloc is aligned as
	 * requested.  XXX: This may not be needed -- brought in for parity
	 * with the Linux driver.
	 */
	if (mw->dma_addr % mw->xlat_align != 0) {
		ntb_printf(0,
		    "DMA memory 0x%jx not aligned to BAR size 0x%zx\n",
		    (uintmax_t)mw->dma_addr, size);
		ntb_free_mw(nt, num_mw);
		return (ENOMEM);
	}

	/* Notify HW the memory location of the receive buffer */
	rc = ntb_mw_set_trans(nt->ntb, num_mw, mw->dma_addr, mw->xlat_size);
	if (rc) {
		ntb_printf(0, "Unable to set mw%d translation\n", num_mw);
		ntb_free_mw(nt, num_mw);
		return (rc);
	}

	return (0);
}
示例#7
0
/*
 * Setup a DMA channel's bounce buffer.
 */
int
isa_dma_init(int chan, u_int bouncebufsize, int flag)
{
	void *buf;
	int contig;

#ifdef DIAGNOSTIC
	if (chan & ~VALID_DMA_MASK)
		panic("isa_dma_init: channel out of range");
#endif


	/* Try malloc() first.  It works better if it works. */
	buf = malloc(bouncebufsize, M_DEVBUF, flag);
	if (buf != NULL) {
		if (isa_dmarangecheck(buf, bouncebufsize, chan) != 0) {
			free(buf, M_DEVBUF);
			buf = NULL;
		}
		contig = 0;
	}

	if (buf == NULL) {
		buf = contigmalloc(bouncebufsize, M_DEVBUF, flag, 0ul, 0xfffffful,
			   1ul, chan & 4 ? 0x20000ul : 0x10000ul);
		contig = 1;
	}

	if (buf == NULL)
		return (ENOMEM);

	mtx_lock(&isa_dma_lock);
	/*
	 * If a DMA channel is shared, both drivers have to call isa_dma_init
	 * since they don't know that the other driver will do it.
	 * Just return if we're already set up good.
	 * XXX: this only works if they agree on the bouncebuf size.  This
	 * XXX: is typically the case since they are multiple instances of
	 * XXX: the same driver.
	 */
	if (dma_bouncebuf[chan] != NULL) {
		if (contig)
			contigfree(buf, bouncebufsize, M_DEVBUF);
		else
			free(buf, M_DEVBUF);
		mtx_unlock(&isa_dma_lock);
		return (0);
	}

	dma_bouncebufsize[chan] = bouncebufsize;
	dma_bouncebuf[chan] = buf;

	mtx_unlock(&isa_dma_lock);

	return (0);
}
示例#8
0
static int
dcons_drv_init(int stage)
{
	int size, size0, offset;

	if (drv_init)
		return(drv_init);

	drv_init = -1;

	dcons_bufsize = DCONS_BUF_SIZE;

#ifndef KLD_MODULE
	if (stage == 0) /* XXX or cold */
		/*
		 * DCONS_FORCE_CONSOLE == 1 and statically linked.
		 * called from cninit(). can't use contigmalloc yet .
		 */
		dcons_buf = (struct dcons_buf *) bssbuf;
	else
#endif
		/*
		 * DCONS_FORCE_CONSOLE == 0 or kernel module case.
		 * if the module is loaded after boot,
		 * dcons_buf could be non-continuous.
		 */ 
		dcons_buf = (struct dcons_buf *) contigmalloc(dcons_bufsize,
			M_DEVBUF, 0, 0x10000, 0xffffffff, PAGE_SIZE, 0ul);

	offset = DCONS_HEADER_SIZE;
	size = (dcons_bufsize - offset);
	size0 = size * 3 / 4;

	dcons_init_port(0, offset, size0);
	offset += size0;
	dcons_init_port(1, offset, size - size0);
	dcons_buf->version = htonl(DCONS_VERSION);
	dcons_buf->magic = ntohl(DCONS_MAGIC);

#if DDB && DCONS_FORCE_GDB
#if CONS_NODEV
	gdbconsdev.cn_arg = (void *)&sc[DCONS_GDB];
#if __FreeBSD_version >= 501109
	sprintf(gdbconsdev.cn_name, "dgdb");
#endif
	gdb_arg = &gdbconsdev;
#else
	gdbdev = makedev(CDEV_MAJOR, DCONS_GDB);
#endif
	gdb_getc = dcons_cngetc;
	gdb_putc = dcons_cnputc;
#endif
	drv_init = 1;

	return 0;
}
示例#9
0
文件: mca.c 项目: MarginC/kame
void
ia64_mca_init(void)
{
	struct ia64_sal_result result;
	uint64_t max_size;
	char *p;
	int i;

	/*
	 * Get the sizes of the state information we can get from SAL and
	 * allocate a common block (forgive me my Fortran :-) for use by
	 * support functions. We create a region 7 address to make it
	 * easy on the OS_MCA or OS_INIT handlers to get the state info
	 * under unreliable conditions.
	 */
	max_size = 0;
	for (i = 0; i < SAL_INFO_TYPES; i++) {
		result = ia64_sal_entry(SAL_GET_STATE_INFO_SIZE, i, 0, 0, 0,
		    0, 0, 0);
		if (result.sal_status == 0) {
			mca_info_size[i] = result.sal_result[0];
			if (mca_info_size[i] > max_size)
				max_size = mca_info_size[i];
		} else
			mca_info_size[i] = -1;
	}
	max_size = round_page(max_size);

	p = contigmalloc(max_size, M_TEMP, M_WAITOK, 0ul, 256*1024*1024 - 1,
	    PAGE_SIZE, 256*1024*1024);

	mca_info_block = IA64_PHYS_TO_RR7(ia64_tpa((u_int64_t)p));

	if (bootverbose)
		printf("MCA: allocated %ld bytes for state information\n",
		    max_size);

	/*
	 * Initialize the spin lock used to protect the info block. When APs
	 * get launched, there's a short moment of contention, but in all other
	 * cases it's not a hot spot. I think it's possible to have the MCA
	 * handler be called on multiple processors at the same time, but that
	 * should be rare. On top of that, performance is not an issue when
	 * dealing with machine checks...
	 */
	mtx_init(&mca_info_block_lock, "MCA spin lock", NULL, MTX_SPIN);

	/*
	 * Get and save any processor and platfom error records. Note that in
	 * a SMP configuration the processor records are for the BSP only. We
	 * let the APs get and save their own records when we wake them up.
	 */
	for (i = 0; i < SAL_INFO_TYPES; i++)
		ia64_mca_save_state(i);
}
示例#10
0
static int
gscattach(struct isa_device *isdp)
{
  int unit = isdp->id_unit;
  struct gsc_unit *scu = unittab + unit;

  scu->flags |= FLAG_DEBUG;

  lprintf(("gsc%d.attach: "
	 "iobase 0x%03x, irq %d, drq %d, addr %p, size %d\n",
	 unit,
	 isdp->id_iobase,
	 isdp->id_irq,
	 isdp->id_drq,
	 isdp->id_maddr,
	 isdp->id_msize));

  printf("gsc%d: GeniScan GS-4500 at %ddpi\n",
	 unit, geomtab[scu->geometry].dpi);

  /*
   * Initialize buffer structure.
   * XXX this must be done early to give a good chance of getting a
   * contiguous buffer.  This wastes memory.
   */
  scu->sbuf.base = contigmalloc((unsigned long)MAX_BUFSIZE, M_DEVBUF, M_NOWAIT,
				0ul, 0xfffffful, 1ul, 0x10000ul);
  if ( scu->sbuf.base == NULL )
    {
      lprintf(("gsc%d.attach: buffer allocation failed\n", unit));
      return ATTACH_FAIL;	/* XXX attach must not fail */
    }
  scu->sbuf.size = INVALID;
  scu->sbuf.poi  = INVALID;

  scu->blen = DEFAULT_BLEN;
  scu->btime = TIMEOUT;

  scu->flags |= ATTACHED;
  lprintf(("gsc%d.attach: ok\n", unit));
  scu->flags &= ~FLAG_DEBUG;
#define GSC_UID 0
#define GSC_GID 13
  make_dev(&gsc_cdevsw, unit<<6, GSC_UID, GSC_GID, 0666, "gsc%d", unit);
  make_dev(&gsc_cdevsw, ((unit<<6) + FRMT_PBM),
     GSC_UID,  GSC_GID, 0666, "gsc%dp", unit);
  make_dev(&gsc_cdevsw, ((unit<<6) + DBUG_MASK),
     GSC_UID,  GSC_GID, 0666, "gsc%dd", unit);
  make_dev(&gsc_cdevsw, ((unit<<6) + DBUG_MASK+FRMT_PBM),
     GSC_UID,  GSC_GID, 0666, "gsc%dpd", unit);

  return ATTACH_SUCCESS;
}
示例#11
0
文件: qp.c 项目: Lxg1582/freebsd
static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
{
	sq->queue = contigmalloc(sq->memsize, M_DEVBUF, M_NOWAIT, 0ul, ~0ul,
	    4096, 0);

	if (sq->queue)
		sq->dma_addr = vtophys(sq->queue);
	else
		return -ENOMEM;
	sq->phys_addr = vtophys(sq->queue);
	pci_unmap_addr_set(sq, mapping, sq->dma_addr);
	CTR4(KTR_IW_CXGBE, "%s sq %p dma_addr %p phys_addr %p", __func__,
	    sq->queue, sq->dma_addr, sq->phys_addr);
	return 0;
}
/*
 * Allocate a piece of memory that can be efficiently mapped into
 * bus device space based on the constraints lited in the dma tag.
 * A dmamap to for use with dmamap_load is also allocated.
 */
int
bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
    bus_dmamap_t *mapp)
{
	int mflags;

	if (flags & BUS_DMA_NOWAIT)
		mflags = M_NOWAIT;
	else
		mflags = M_WAITOK;
	if (flags & BUS_DMA_ZERO)
		mflags |= M_ZERO;

        *mapp = NULL;

	/* 
	 * XXX:
	 * (dmat->alignment < dmat->maxsize) is just a quick hack; the exact
	 * alignment guarantees of malloc need to be nailed down, and the
	 * code below should be rewritten to take that into account.
	 *
	 * In the meantime, we'll return an error if malloc gets it wrong.
	 */
	if (dmat->maxsize <= PAGE_SIZE &&
	    dmat->alignment < dmat->maxsize) {
		*vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags);
        } else {
                /*
                 * XXX Use Contigmalloc until it is merged into this facility
                 *     and handles multi-seg allocations.  Nobody is doing
                 *     multi-seg allocations yet though.
                 */
                *vaddr = contigmalloc(dmat->maxsize, M_DEVBUF, mflags,
                    0ul, dmat->lowaddr, dmat->alignment? dmat->alignment : 1ul,
                    dmat->boundary);
        }

        if (*vaddr == NULL)
                return (ENOMEM);

	if ((uintptr_t)*vaddr % dmat->alignment)
		printf("XXX: %s: alignment not respected!\n", __func__);

        return (0);
}
示例#13
0
static int
virtqueue_init_indirect(struct virtqueue *vq, int indirect_size)
{
	device_t dev;
	struct vq_desc_extra *dxp;
	int i, size;

	dev = vq->vq_dev;

	if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_INDIRECT_DESC) == 0) {
		/*
		 * Indirect descriptors requested by the driver but not
		 * negotiated. Return zero to keep the initialization
		 * going: we'll run fine without.
		 */
		if (bootverbose)
			device_printf(dev, "virtqueue %d (%s) requested "
			    "indirect descriptors but not negotiated\n",
			    vq->vq_queue_index, vq->vq_name);
		return (0);
	}

	size = indirect_size * sizeof(struct vring_desc);
	vq->vq_max_indirect_size = indirect_size;
	vq->vq_indirect_mem_size = size;
	vq->vq_flags |= VIRTQUEUE_FLAG_INDIRECT;

	for (i = 0; i < vq->vq_nentries; i++) {
		dxp = &vq->vq_descx[i];

		dxp->indirect = contigmalloc(size, M_DEVBUF, M_WAITOK,
		    0, BUS_SPACE_MAXADDR, 16, 0);
		if (dxp->indirect == NULL) {
			device_printf(dev, "cannot allocate indirect list\n");
			return (ENOMEM);
		}

		dxp->indirect_paddr = vtophys(dxp->indirect);
		virtqueue_init_indirect_list(vq, dxp->indirect);
	}

	return (0);
}
示例#14
0
void *
oss_contig_malloc (unsigned long buffsize, unsigned long memlimit,
		   oss_native_word * phaddr)
{
  char *tmpbuf;
  *phaddr = 0;

  tmpbuf =
    (char *) contigmalloc (buffsize, M_DEVBUF, M_WAITOK, 0ul, memlimit,
			   PAGE_SIZE, 0ul);
  if (tmpbuf == NULL)
    {
      cmn_err (CE_CONT, "OSS: Unable to allocate %lu bytes for a DMA buffer\n",
	       buffsize);
      cmn_err (CE_CONT, "run soundoff and run soundon again.\n");
      return NULL;
    }
  *phaddr = vtophys (tmpbuf);
  return tmpbuf;
}
示例#15
0
/*
 * Common function for DMA-safe memory allocation.  May be called
 * by bus-specific DMA memory allocation functions.
 */
static int
nexus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags,
    bus_dmamap_t *mapp)
{
	int mflags;

	if (flags & BUS_DMA_NOWAIT)
		mflags = M_NOWAIT;
	else
		mflags = M_WAITOK;
	if (flags & BUS_DMA_ZERO)
		mflags |= M_ZERO;

	/*
	 * XXX:
	 * (dmat->dt_alignment < dmat->dt_maxsize) is just a quick hack; the
	 * exact alignment guarantees of malloc need to be nailed down, and
	 * the code below should be rewritten to take that into account.
	 *
	 * In the meantime, we'll warn the user if malloc gets it wrong.
	 */
	if (dmat->dt_maxsize <= PAGE_SIZE &&
	    dmat->dt_alignment < dmat->dt_maxsize)
		*vaddr = malloc(dmat->dt_maxsize, M_DEVBUF, mflags);
	else {
		/*
		 * XXX use contigmalloc until it is merged into this
		 * facility and handles multi-seg allocations.  Nobody
		 * is doing multi-seg allocations yet though.
		 */
		*vaddr = contigmalloc(dmat->dt_maxsize, M_DEVBUF, mflags,
		    0ul, dmat->dt_lowaddr,
		    dmat->dt_alignment ? dmat->dt_alignment : 1UL,
		    dmat->dt_boundary);
	}
	if (*vaddr == NULL)
		return (ENOMEM);
	if (vtophys(*vaddr) % dmat->dt_alignment)
		printf("%s: failed to align memory properly.\n", __func__);
	return (0);
}
示例#16
0
void *
x86bios_alloc(uint32_t *offset, size_t size, int flags)
{
	void *vaddr;
	int i;

	if (offset == NULL || size == 0)
		return (NULL);
	vaddr = contigmalloc(size, M_DEVBUF, flags, 0, X86BIOS_MEM_SIZE,
	    PAGE_SIZE, 0);
	if (vaddr != NULL) {
		*offset = vtophys(vaddr);
		mtx_lock(&x86bios_lock);
		for (i = 0; i < atop(round_page(size)); i++)
			vm86_addpage(&x86bios_vmc, atop(*offset) + i,
			    (vm_offset_t)vaddr + ptoa(i));
		mtx_unlock(&x86bios_lock);
	}

	return (vaddr);
}
示例#17
0
/*
 * Setup a DMA channel's bounce buffer.
 */
int
isa_dma_init(int chan, u_int bouncebufsize, int flag)
{
	void *buf;

#ifdef DIAGNOSTIC
	if (chan & ~VALID_DMA_MASK)
		panic("isa_dma_init: channel out of range");
	if (dma_bouncebuf[chan] != NULL)
		panic("isa_dma_init: impossible request"); 
#endif


	/* Try malloc() first.  It works better if it works. */
	buf = malloc(bouncebufsize, M_DEVBUF, flag);
	if (buf != NULL) {
		if (isa_dmarangecheck(buf, bouncebufsize, chan) != 0) {
			free(buf, M_DEVBUF);
			buf = NULL;
		}
	}

	if (buf == NULL) {
		buf = contigmalloc(bouncebufsize, M_DEVBUF, flag, 0ul, 0xfffffful,
			   1ul, chan & 4 ? 0x20000ul : 0x10000ul);
	}

	if (buf == NULL)
		return (ENOMEM);

	mtx_lock(&isa_dma_lock);

	dma_bouncebufsize[chan] = bouncebufsize;
	dma_bouncebuf[chan] = buf;

	mtx_unlock(&isa_dma_lock);

	return (0);
}
示例#18
0
static int
ntb_set_mw(struct ntb_netdev *nt, int num_mw, unsigned int size)
{
	struct ntb_transport_mw *mw = &nt->mw[num_mw];

	/* Alloc memory for receiving data.  Must be 4k aligned */
	mw->size = size;

	mw->virt_addr = contigmalloc(mw->size, M_NTB_IF, M_ZERO, 0,
	    BUS_SPACE_MAXADDR, mw->size, 0);
	if (mw->virt_addr == NULL) {
		printf("ntb: Unable to allocate MW buffer of size %d\n",
		    (int)mw->size);
		return (ENOMEM);
	}
	/* TODO: replace with bus_space_* functions */
	mw->dma_addr = vtophys(mw->virt_addr);

	/* Notify HW the memory location of the receive buffer */
	ntb_set_mw_addr(nt->ntb, num_mw, mw->dma_addr);

	return (0);
}
示例#19
0
int
XX_MallocSmartInit(void)
{
	int error;

	error = E_OK;
	mtx_lock(&XX_MallocSmartLock);

	if (XX_MallocSmartPool)
		goto out;

	/* Allocate MallocSmart pool */
	XX_MallocSmartPool = contigmalloc(MALLOCSMART_POOL_SIZE, M_NETCOMMSW,
	    M_NOWAIT, 0, 0xFFFFFFFFFull, MALLOCSMART_POOL_SIZE, 0);
	if (!XX_MallocSmartPool) {
		error = E_NO_MEMORY;
		goto out;
	}

out:
	mtx_unlock(&XX_MallocSmartLock);
	return (error);
}
示例#20
0
static int
ps3pic_attach(device_t dev)
{
	struct ps3pic_softc *sc;
	uint64_t ppe;
	int thread;

	sc = device_get_softc(dev);

	sc->bitmap_thread0 = contigmalloc(128 /* 512 bits * 2 */, M_PS3PIC,
	    M_NOWAIT | M_ZERO, 0, BUS_SPACE_MAXADDR, 64 /* alignment */,
	    PAGE_SIZE /* boundary */);
	sc->mask_thread0 = sc->bitmap_thread0 + 4;
	sc->bitmap_thread1 = sc->bitmap_thread0 + 8;
	sc->mask_thread1 = sc->bitmap_thread0 + 12;

	lv1_get_logical_ppe_id(&ppe);
	thread = 32 - fls(mfctrl());
	lv1_configure_irq_state_bitmap(ppe, thread,
	    vtophys(sc->bitmap_thread0));
#ifdef SMP
	lv1_configure_irq_state_bitmap(ppe, !thread,
	    vtophys(sc->bitmap_thread1));

	/* Map both IPIs to the same VIRQ to avoid changes in intr_machdep */
	lv1_construct_event_receive_port(&sc->sc_ipi_outlet[0]);
	lv1_connect_irq_plug_ext(ppe, thread, sc->sc_ipi_outlet[0],
	    sc->sc_ipi_outlet[0], 0);
	lv1_construct_event_receive_port(&sc->sc_ipi_outlet[1]);
	lv1_connect_irq_plug_ext(ppe, !thread, sc->sc_ipi_outlet[0],
	    sc->sc_ipi_outlet[1], 0);
#endif

	powerpc_register_pic(dev, 0, sc->sc_ipi_outlet[0], 1, FALSE);
	return (0);
}
示例#21
0
/*
 * Net VSC initialize send buffer with net VSP
 */
static int 
hv_nv_init_send_buffer_with_net_vsp(struct hv_device *device)
{
	netvsc_dev *net_dev;
	nvsp_msg *init_pkt;
	int ret = 0;

	net_dev = hv_nv_get_outbound_net_device(device);
	if (!net_dev) {
		return (ENODEV);
	}

	net_dev->send_buf  = contigmalloc(net_dev->send_buf_size, M_NETVSC,
	    M_ZERO, 0UL, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
	if (net_dev->send_buf == NULL) {
		ret = ENOMEM;
		goto cleanup;
	}

	/*
	 * Establish the gpadl handle for this buffer on this channel.
	 * Note:  This call uses the vmbus connection rather than the
	 * channel to establish the gpadl handle. 
	 */
	ret = hv_vmbus_channel_establish_gpadl(device->channel,
  	    net_dev->send_buf, net_dev->send_buf_size,
	    &net_dev->send_buf_gpadl_handle);
	if (ret != 0) {
		goto cleanup;
	}

	/* Notify the NetVsp of the gpadl handle */

	init_pkt = &net_dev->channel_init_packet;

	memset(init_pkt, 0, sizeof(nvsp_msg));

	init_pkt->hdr.msg_type = nvsp_msg_1_type_send_send_buf;
	init_pkt->msgs.vers_1_msgs.send_rx_buf.gpadl_handle =
	    net_dev->send_buf_gpadl_handle;
	init_pkt->msgs.vers_1_msgs.send_rx_buf.id =
	    NETVSC_SEND_BUFFER_ID;

	/* Send the gpadl notification request */

	ret = hv_vmbus_channel_send_packet(device->channel, init_pkt,
  	    sizeof(nvsp_msg), (uint64_t)init_pkt,
	    HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
	    HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
	if (ret != 0) {
		goto cleanup;
	}

	sema_wait(&net_dev->channel_init_sema);

	/* Check the response */
	if (init_pkt->msgs.vers_1_msgs.send_send_buf_complete.status
	    != nvsp_status_success) {
		ret = EINVAL;
		goto cleanup;
	}

	net_dev->send_section_size =
	    init_pkt->msgs.vers_1_msgs.send_send_buf_complete.section_size;
	net_dev->send_section_count =
	    net_dev->send_buf_size / net_dev->send_section_size;
	net_dev->bitsmap_words = howmany(net_dev->send_section_count,
	    BITS_PER_LONG);
	net_dev->send_section_bitsmap =
	    malloc(net_dev->bitsmap_words * sizeof(long), M_NETVSC,
	    M_WAITOK | M_ZERO);

	goto exit;

cleanup:
	hv_nv_destroy_send_buffer(net_dev);
	
exit:
	return (ret);
}
示例#22
0
int
virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size, int align,
    vm_paddr_t highaddr, struct vq_alloc_info *info, struct virtqueue **vqp)
{
	struct virtqueue *vq;
	int error;

	*vqp = NULL;
	error = 0;

	if (size == 0) {
		device_printf(dev,
		    "virtqueue %d (%s) does not exist (size is zero)\n",
		    queue, info->vqai_name);
		return (ENODEV);
	} else if (!powerof2(size)) {
		device_printf(dev,
		    "virtqueue %d (%s) size is not a power of 2: %d\n",
		    queue, info->vqai_name, size);
		return (ENXIO);
	} else if (info->vqai_maxindirsz > VIRTIO_MAX_INDIRECT) {
		device_printf(dev, "virtqueue %d (%s) requested too many "
		    "indirect descriptors: %d, max %d\n",
		    queue, info->vqai_name, info->vqai_maxindirsz,
		    VIRTIO_MAX_INDIRECT);
		return (EINVAL);
	}

	vq = kmalloc(sizeof(struct virtqueue) +
	    size * sizeof(struct vq_desc_extra), M_DEVBUF, M_INTWAIT | M_ZERO);
	if (vq == NULL) {
		device_printf(dev, "cannot allocate virtqueue\n");
		return (ENOMEM);
	}

	vq->vq_dev = dev;
	strlcpy(vq->vq_name, info->vqai_name, sizeof(vq->vq_name));
	vq->vq_queue_index = queue;
	vq->vq_alignment = align;
	vq->vq_nentries = size;
	vq->vq_free_cnt = size;
	vq->vq_intrhand = info->vqai_intr;
	vq->vq_intrhand_arg = info->vqai_intr_arg;

	if (VIRTIO_BUS_WITH_FEATURE(dev, VIRTIO_RING_F_EVENT_IDX) != 0)
		vq->vq_flags |= VIRTQUEUE_FLAG_EVENT_IDX;

	if (info->vqai_maxindirsz > 1) {
		error = virtqueue_init_indirect(vq, info->vqai_maxindirsz);
		if (error)
			goto fail;
	}

	vq->vq_ring_size = round_page(vring_size(size, align));
	vq->vq_ring_mem = contigmalloc(vq->vq_ring_size, M_DEVBUF,
	    M_WAITOK | M_ZERO, 0, highaddr, PAGE_SIZE, 0);
	if (vq->vq_ring_mem == NULL) {
		device_printf(dev,
		    "cannot allocate memory for virtqueue ring\n");
		error = ENOMEM;
		goto fail;
	}

	vq_ring_init(vq);
	virtqueue_disable_intr(vq);

	*vqp = vq;

fail:
	if (error)
		virtqueue_free(vq);

	return (error);
}
示例#23
0
/* Attach the interface. Allocate softc structures */
static int
sln_attach(device_t dev)
{
	struct sln_softc *sc = device_get_softc(dev);
	struct ifnet *ifp = &sc->arpcom.ac_if;
	unsigned char eaddr[ETHER_ADDR_LEN];
	int rid;
	int error = 0;

	if_initname(ifp, device_get_name(dev), device_get_unit(dev));

	/* TODO: power state change */

	pci_enable_busmaster(dev);

	rid = SL_RID;
	sc->sln_res = bus_alloc_resource_any(dev, SL_RES, &rid, RF_ACTIVE);
	if (sc->sln_res == NULL) {
		device_printf(dev, "couldn't map ports/memory\n");
		error = ENXIO;
		goto fail;
	}
	sc->sln_bustag = rman_get_bustag(sc->sln_res);
	sc->sln_bushandle = rman_get_bushandle(sc->sln_res);

	/* alloc pci irq */
	rid = 0;
	sc->sln_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
	    RF_SHAREABLE | RF_ACTIVE);
	if (sc->sln_irq == NULL) {
		device_printf(dev, "couldn't map interrupt\n");
		bus_release_resource(dev, SL_RES, SL_RID, sc->sln_res);
		error = ENXIO;
		goto fail;
	}

	/* Get MAC address */
	((uint32_t *)(&eaddr))[0] = be32toh(SLN_READ_4(sc, SL_MAC_ADDR0));
	((uint16_t *)(&eaddr))[2] = be16toh(SLN_READ_4(sc, SL_MAC_ADDR1));

	/* alloc rx buffer space */
	sc->sln_bufdata.sln_rx_buf = contigmalloc(SL_RX_BUFLEN,
	    M_DEVBUF, M_WAITOK, 0, 0xffffffff, PAGE_SIZE, 0);
	if (sc->sln_bufdata.sln_rx_buf == NULL) {
		device_printf(dev, "no memory for rx buffers!\n");
		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sln_irq);
		bus_release_resource(dev, SL_RES, SL_RID, sc->sln_res);
		error = ENXIO;
		goto fail;
	}
	callout_init(&sc->sln_state);

	ifp->if_softc = sc;
	ifp->if_mtu = ETHERMTU;
	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
	ifp->if_init = sln_init;
	ifp->if_start = sln_tx;
	ifp->if_ioctl = sln_ioctl;
	ifp->if_watchdog = sln_watchdog;
	ifq_set_maxlen(&ifp->if_snd, IFQ_MAXLEN);
	ifq_set_ready(&ifp->if_snd);

	/* initial media */
	ifmedia_init(&sc->ifmedia, 0, sln_media_upd, sln_media_stat);

	/* supported media types */
	ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL);
	ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T, 0, NULL);
	ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX, 0, NULL);
	ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
	ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL);
	ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX, 0, NULL);
	ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);

	/* Choose a default media. */
	ifmedia_set(&sc->ifmedia, IFM_ETHER | IFM_AUTO);

	ether_ifattach(ifp, eaddr, NULL);

	error = bus_setup_intr(dev, sc->sln_irq, INTR_MPSAFE, sln_interrupt, sc,
			       &sc->sln_intrhand, ifp->if_serializer);
	if (error) {
		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sln_irq);
		bus_release_resource(dev, SL_RES, SL_RID, sc->sln_res);
		ether_ifdetach(ifp);
		device_printf(dev, "couldn't set up irq\n");
		goto fail;
	}

	ifp->if_cpuid = rman_get_cpuid(sc->sln_irq);
	KKASSERT(ifp->if_cpuid >= 0 && ifp->if_cpuid < ncpus);

	return 0;
fail:
	return error;
}
示例#24
0
static void
rtas_setup(void *junk)
{
	ihandle_t rtasi;
	cell_t rtas_size = 0, rtas_ptr;
	char path[31];
	int result;

	rtas = OF_finddevice("/rtas");
	if (rtas == -1) {
		rtas = 0;
		return;
	}
	OF_package_to_path(rtas, path, sizeof(path));
	rtasi = OF_open(path);
	if (rtasi == 0) {
		rtas = 0;
		printf("Error initializing RTAS: could not open node\n");
		return;
	}

	mtx_init(&rtas_mtx, "RTAS", MTX_DEF, 0);

	/* RTAS must be called with everything turned off in MSR */
	rtasmsr = mfmsr();
	rtasmsr &= ~(PSL_IR | PSL_DR | PSL_EE | PSL_SE);
	#ifdef __powerpc64__
	rtasmsr &= ~PSL_SF;
	#endif

	/*
	 * Allocate rtas_size + one page of contiguous, wired physical memory
	 * that can fit into a 32-bit address space and accessed from real mode.
	 * This is used both to bounce arguments and for RTAS private data.
	 *
	 * It must be 4KB-aligned and not cross a 256 MB boundary.
	 */

	OF_getprop(rtas, "rtas-size", &rtas_size, sizeof(rtas_size));
	rtas_size = round_page(rtas_size);
	rtas_bounce_virt = contigmalloc(rtas_size + PAGE_SIZE, M_RTAS, 0, 0,
	    ulmin(platform_real_maxaddr(), BUS_SPACE_MAXADDR_32BIT),
	    4096, 256*1024*1024);

	rtas_private_data = vtophys(rtas_bounce_virt);
	rtas_bounce_virt += rtas_size;	/* Actual bounce area */
	rtas_bounce_phys = vtophys(rtas_bounce_virt);
	rtas_bounce_size = PAGE_SIZE;

	/*
	 * Instantiate RTAS. We always use the 32-bit version.
	 */

	result = OF_call_method("instantiate-rtas", rtasi, 1, 1,
	    (cell_t)rtas_private_data, &rtas_ptr);
	OF_close(rtasi);

	if (result != 0) {
		rtas = 0;
		rtas_ptr = 0;
		printf("Error initializing RTAS (%d)\n", result);
		return;
	}

	rtas_entry = (uintptr_t)(rtas_ptr);
}
示例#25
0
文件: nlmrsa.c 项目: JabirTech/Source
static int 
xlp_rsa_init(struct xlp_rsa_softc *sc, int node)
{
	struct xlp_rsa_command *cmd = NULL;
	uint32_t fbvc, dstvc, endsel, regval;
	struct nlm_fmn_msg m;
	int err, ret, i;
	uint64_t base;

	/* Register interrupt handler for the RSA/ECC CMS messages */
	if (register_msgring_handler(sc->rsaecc_vc_start,
	    sc->rsaecc_vc_end, nlm_xlprsaecc_msgring_handler, sc) != 0) {
		err = -1;
		printf("Couldn't register rsa/ecc msgring handler\n");
		goto errout;
	}
	fbvc = nlm_cpuid() * 4 + XLPGE_FB_VC;
	/* Do the CMS credit initialization */
	/* Currently it is configured by default to 50 when kernel comes up */

#if BYTE_ORDER == LITTLE_ENDIAN
	for (i = 0; i < nitems(nlm_rsa_ucode_data); i++)
		nlm_rsa_ucode_data[i] = htobe64(nlm_rsa_ucode_data[i]);
#endif
	for (dstvc = sc->rsaecc_vc_start; dstvc <= sc->rsaecc_vc_end; dstvc++) {
		cmd = malloc(sizeof(struct xlp_rsa_command), M_DEVBUF,
		    M_NOWAIT | M_ZERO);
		KASSERT(cmd != NULL, ("%s:cmd is NULL\n", __func__));
		cmd->rsasrc = contigmalloc(sizeof(nlm_rsa_ucode_data),
		    M_DEVBUF,
		    (M_WAITOK | M_ZERO),
		    0UL /* low address */, -1UL /* high address */,
		    XLP_L2L3_CACHELINE_SIZE /* alignment */,
		    0UL /* boundary */);
		KASSERT(cmd->rsasrc != NULL,
		    ("%s:cmd->rsasrc is NULL\n", __func__));
		memcpy(cmd->rsasrc, nlm_rsa_ucode_data,
		    sizeof(nlm_rsa_ucode_data));
		m.msg[0] = nlm_crypto_form_rsa_ecc_fmn_entry0(1, 0x70, 0,
		    vtophys(cmd->rsasrc));
		m.msg[1] = nlm_crypto_form_rsa_ecc_fmn_entry1(0, 1, fbvc,
		    vtophys(cmd->rsasrc));
		/* Software scratch pad */
		m.msg[2] = (uintptr_t)cmd;
		m.msg[3] = 0;

		ret = nlm_fmn_msgsend(dstvc, 3, FMN_SWCODE_RSA, &m);
		if (ret != 0) {
			err = -1;
			printf("%s: msgsnd failed (%x)\n", __func__, ret);
			goto errout;
		}
	}
	/* Configure so that all VCs send request to all RSA pipes */
	base = nlm_get_rsa_regbase(node);
	if (nlm_is_xlp3xx()) {
		endsel = 1;
		regval = 0xFFFF;
	} else {
		endsel = 3;
		regval = 0x07FFFFFF;
	}
	for (i = 0; i < endsel; i++)
		nlm_write_rsa_reg(base, RSA_ENG_SEL_0 + i, regval);
	return (0);
errout:
	xlp_free_cmd_params(cmd);
	return (err);
}
示例#26
0
static int
dcons_drv_init(int stage)
{
#if defined(__i386__) || defined(__amd64__)
	quad_t addr, size;
#endif

	if (drv_init)
		return(drv_init);

	drv_init = -1;

	bzero(&dg, sizeof(dg));
	dcons_conf = &dg;
	dg.cdev = &dcons_consdev;
	dg.buf = NULL;
	dg.size = DCONS_BUF_SIZE;

#if defined(__i386__) || defined(__amd64__)
	if (getenv_quad("dcons.addr", &addr) > 0 &&
	    getenv_quad("dcons.size", &size) > 0) {
#ifdef __i386__
		vm_paddr_t pa;
		/*
		 * Allow read/write access to dcons buffer.
		 */
		for (pa = trunc_page(addr); pa < addr + size; pa += PAGE_SIZE)
			*vtopte(KERNBASE + pa) |= PG_RW;
		invltlb();
#endif
		/* XXX P to V */
		dg.buf = (struct dcons_buf *)(vm_offset_t)(KERNBASE + addr);
		dg.size = size;
		if (dcons_load_buffer(dg.buf, dg.size, sc) < 0)
			dg.buf = NULL;
	}
#endif
	if (dg.buf != NULL)
		goto ok;

#ifndef KLD_MODULE
	if (stage == 0) { /* XXX or cold */
		/*
		 * DCONS_FORCE_CONSOLE == 1 and statically linked.
		 * called from cninit(). can't use contigmalloc yet .
		 */
		dg.buf = (struct dcons_buf *) bssbuf;
		dcons_init(dg.buf, dg.size, sc);
	} else
#endif
	{
		/*
		 * DCONS_FORCE_CONSOLE == 0 or kernel module case.
		 * if the module is loaded after boot,
		 * bssbuf could be non-continuous.
		 */ 
		dg.buf = (struct dcons_buf *) contigmalloc(dg.size,
			M_DEVBUF, 0, 0x10000, 0xffffffff, PAGE_SIZE, 0ul);
		if (dg.buf == NULL)
			return (-1);
		dcons_init(dg.buf, dg.size, sc);
	}

ok:
	dcons_buf = dg.buf;

	drv_init = 1;

	return 0;
}
示例#27
0
/*
 * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory,
 * but there are some problems currently (alignment, etc).
 */
static __inline void *__get_free_pages(int order)
{
	/* don't use low memory - other devices may get starved */
	return contigmalloc(PAGE_SIZE<<order, 
			M_DEVBUF, M_WAITOK, BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
}
示例#28
0
/*
 * Net VSC initialize receive buffer with net VSP
 * 
 * Net VSP:  Network virtual services client, also known as the
 *     Hyper-V extensible switch and the synthetic data path.
 */
static int 
hv_nv_init_rx_buffer_with_net_vsp(struct hv_device *device)
{
	netvsc_dev *net_dev;
	nvsp_msg *init_pkt;
	int ret = 0;

	net_dev = hv_nv_get_outbound_net_device(device);
	if (!net_dev) {
		return (ENODEV);
	}

	net_dev->rx_buf = contigmalloc(net_dev->rx_buf_size, M_NETVSC,
	    M_ZERO, 0UL, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);

	/*
	 * Establish the GPADL handle for this buffer on this channel.
	 * Note:  This call uses the vmbus connection rather than the
	 * channel to establish the gpadl handle. 
	 * GPADL:  Guest physical address descriptor list.
	 */
	ret = hv_vmbus_channel_establish_gpadl(
		device->channel, net_dev->rx_buf,
		net_dev->rx_buf_size, &net_dev->rx_buf_gpadl_handle);
	if (ret != 0) {
		goto cleanup;
	}
	
	/* sema_wait(&ext->channel_init_sema); KYS CHECK */

	/* Notify the NetVsp of the gpadl handle */
	init_pkt = &net_dev->channel_init_packet;

	memset(init_pkt, 0, sizeof(nvsp_msg));

	init_pkt->hdr.msg_type = nvsp_msg_1_type_send_rx_buf;
	init_pkt->msgs.vers_1_msgs.send_rx_buf.gpadl_handle =
	    net_dev->rx_buf_gpadl_handle;
	init_pkt->msgs.vers_1_msgs.send_rx_buf.id =
	    NETVSC_RECEIVE_BUFFER_ID;

	/* Send the gpadl notification request */

	ret = hv_vmbus_channel_send_packet(device->channel, init_pkt,
	    sizeof(nvsp_msg), (uint64_t)(uintptr_t)init_pkt,
	    HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
	    HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
	if (ret != 0) {
		goto cleanup;
	}

	sema_wait(&net_dev->channel_init_sema);

	/* Check the response */
	if (init_pkt->msgs.vers_1_msgs.send_rx_buf_complete.status
	    != nvsp_status_success) {
		ret = EINVAL;
		goto cleanup;
	}

	net_dev->rx_section_count =
	    init_pkt->msgs.vers_1_msgs.send_rx_buf_complete.num_sections;

	net_dev->rx_sections = malloc(net_dev->rx_section_count *
	    sizeof(nvsp_1_rx_buf_section), M_NETVSC, M_WAITOK);
	memcpy(net_dev->rx_sections, 
	    init_pkt->msgs.vers_1_msgs.send_rx_buf_complete.sections,
	    net_dev->rx_section_count * sizeof(nvsp_1_rx_buf_section));


	/*
	 * For first release, there should only be 1 section that represents
	 * the entire receive buffer
	 */
	if (net_dev->rx_section_count != 1
	    || net_dev->rx_sections->offset != 0) {
		ret = EINVAL;
		goto cleanup;
	}

	goto exit;

cleanup:
	hv_nv_destroy_rx_buffer(net_dev);
	
exit:
	return (ret);
}
unsigned long
n8_GetLargeAllocation(N8_MemoryType_t bankIndex,
                      unsigned long size, unsigned char debug)
{

    NspInstance_t	*nip  = &NSPDeviceTable_g[0];	/* can only attach once */
    struct nsp_softc *sc = (struct nsp_softc *)nip->dev;

    bus_dma_segment_t seg;
    int rseg;
    void *kva = NULL;

#if 0
    /* Replacement for: */
    m = contigmalloc(size, M_DEVBUF, M_WAITOK,
                     0, 		/* lower acceptible phys addr	*/
                     0xffffffff,	/* upper acceptible phys addr	*/
                     PAGE_SIZE,		/* alignment			*/
                     0);		/* boundary			*/
#endif
    if (bus_dmamem_alloc(sc->dma_tag, size, PAGE_SIZE, 0,
                         &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
        printf("%s: can't alloc DMA buffer\n", sc->device.dv_xname);
        return 0;
    }
    if (bus_dmamem_map(sc->dma_tag, &seg, rseg, size, &kva,
                       BUS_DMA_NOWAIT)) {
        printf("%s: can't map DMA buffers (%lu bytes)\n", sc->device.dv_xname,
               size);
        bus_dmamem_free(sc->dma_tag, &seg, rseg);
        return 0;
    }
    if (bus_dmamap_create(sc->dma_tag, size, 1,
                          size, 0, BUS_DMA_NOWAIT, &DmaMap_g[bankIndex])) {
        printf("%s: can't create DMA map\n", sc->device.dv_xname);
        bus_dmamem_unmap(sc->dma_tag, kva, size);
        bus_dmamem_free(sc->dma_tag, &seg, rseg);
        return 0;
    }
    if (bus_dmamap_load(sc->dma_tag, DmaMap_g[bankIndex], kva, size,
                        NULL, BUS_DMA_NOWAIT)) {
        printf("%s: can't load DMA map\n", sc->device.dv_xname);
        bus_dmamap_destroy(sc->dma_tag, DmaMap_g[bankIndex]);
        bus_dmamem_unmap(sc->dma_tag, kva, size);
        bus_dmamem_free(sc->dma_tag, &seg, rseg);
        return 0;
    }
    if (kva) {
        /* bzero(kva, size) */
        BasePointer_g[bankIndex]    = kva;
        MemSize_g[bankIndex]        = size;
        Seg_g[bankIndex]            = seg;
        Rseg_g[bankIndex]           = rseg;
        MemBaseAddress_g[bankIndex] = vtophys((u_int)kva);
        MemTopAddress_g[bankIndex]  = MemBaseAddress_g[bankIndex] + size;
    }

    if (debug)
    {
        printf("n8_GetLargeAllocation: %p (0x%08lx) allocated for bankIndex %d\n",
               BasePointer_g[bankIndex], MemBaseAddress_g[bankIndex], bankIndex);
    }

    return MemBaseAddress_g[bankIndex];
}
示例#30
0
文件: qp.c 项目: Lxg1582/freebsd
static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
		     struct t4_cq *rcq, struct t4_cq *scq,
		     struct c4iw_dev_ucontext *uctx)
{
	struct adapter *sc = rdev->adap;
	int user = (uctx != &rdev->uctx);
	struct fw_ri_res_wr *res_wr;
	struct fw_ri_res *res;
	int wr_len;
	struct c4iw_wr_wait wr_wait;
	int ret;
	int eqsize;
	struct wrqe *wr;

	wq->sq.qid = c4iw_get_qpid(rdev, uctx);
	if (!wq->sq.qid)
		return -ENOMEM;

	wq->rq.qid = c4iw_get_qpid(rdev, uctx);
	if (!wq->rq.qid)
		goto err1;

	if (!user) {
		wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
				 GFP_KERNEL);
		if (!wq->sq.sw_sq)
			goto err2;

		wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
				 GFP_KERNEL);
		if (!wq->rq.sw_rq)
			goto err3;
	}

	/* RQT must be a power of 2. */
	wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
	wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
	if (!wq->rq.rqt_hwaddr)
		goto err4;

	if (alloc_host_sq(rdev, &wq->sq))
		goto err5;

	memset(wq->sq.queue, 0, wq->sq.memsize);
	pci_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);

	wq->rq.queue = contigmalloc(wq->rq.memsize,
            M_DEVBUF, M_NOWAIT, 0ul, ~0ul, 4096, 0);
        if (wq->rq.queue)
                wq->rq.dma_addr = vtophys(wq->rq.queue);
        else
                goto err6;
	CTR5(KTR_IW_CXGBE,
	    "%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx", __func__,
	    wq->sq.queue, (unsigned long long)vtophys(wq->sq.queue),
	    wq->rq.queue, (unsigned long long)vtophys(wq->rq.queue));
	memset(wq->rq.queue, 0, wq->rq.memsize);
	pci_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);

	wq->db = (void *)((unsigned long)rman_get_virtual(sc->regs_res) +
	    MYPF_REG(SGE_PF_KDOORBELL));
	wq->gts = (void *)((unsigned long)rman_get_virtual(rdev->adap->regs_res)
			   + MYPF_REG(SGE_PF_GTS));
	if (user) {
		wq->sq.udb = (u64)((char*)rman_get_virtual(rdev->adap->udbs_res) +
						(wq->sq.qid << rdev->qpshift));
		wq->sq.udb &= PAGE_MASK;
		wq->rq.udb = (u64)((char*)rman_get_virtual(rdev->adap->udbs_res) +
						(wq->rq.qid << rdev->qpshift));
		wq->rq.udb &= PAGE_MASK;
	}
	wq->rdev = rdev;
	wq->rq.msn = 1;

	/* build fw_ri_res_wr */
	wr_len = sizeof *res_wr + 2 * sizeof *res;

	wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
        if (wr == NULL)
		return (0);
        res_wr = wrtod(wr);

	memset(res_wr, 0, wr_len);
	res_wr->op_nres = cpu_to_be32(
			V_FW_WR_OP(FW_RI_RES_WR) |
			V_FW_RI_RES_WR_NRES(2) |
			F_FW_WR_COMPL);
	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
	res_wr->cookie = (unsigned long) &wr_wait;
	res = res_wr->res;
	res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
	res->u.sqrq.op = FW_RI_RES_OP_WRITE;

	/* eqsize is the number of 64B entries plus the status page size. */
	eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + spg_creds;

	res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
		V_FW_RI_RES_WR_HOSTFCMODE(0) |	/* no host cidx updates */
		V_FW_RI_RES_WR_CPRIO(0) |	/* don't keep in chip cache */
		V_FW_RI_RES_WR_PCIECHN(0) |	/* set by uP at ri_init time */
		V_FW_RI_RES_WR_IQID(scq->cqid));
	res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
		V_FW_RI_RES_WR_DCAEN(0) |
		V_FW_RI_RES_WR_DCACPU(0) |
		V_FW_RI_RES_WR_FBMIN(2) |
		V_FW_RI_RES_WR_FBMAX(2) |
		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
		V_FW_RI_RES_WR_EQSIZE(eqsize));
	res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
	res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
	res++;
	res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
	res->u.sqrq.op = FW_RI_RES_OP_WRITE;

	/* eqsize is the number of 64B entries plus the status page size. */
	eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + spg_creds ;
	res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
		V_FW_RI_RES_WR_HOSTFCMODE(0) |	/* no host cidx updates */
		V_FW_RI_RES_WR_CPRIO(0) |	/* don't keep in chip cache */
		V_FW_RI_RES_WR_PCIECHN(0) |	/* set by uP at ri_init time */
		V_FW_RI_RES_WR_IQID(rcq->cqid));
	res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
		V_FW_RI_RES_WR_DCAEN(0) |
		V_FW_RI_RES_WR_DCACPU(0) |
		V_FW_RI_RES_WR_FBMIN(2) |
		V_FW_RI_RES_WR_FBMAX(2) |
		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
		V_FW_RI_RES_WR_EQSIZE(eqsize));
	res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
	res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);

	c4iw_init_wr_wait(&wr_wait);

	t4_wrq_tx(sc, wr);
	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
	if (ret)
		goto err7;

	CTR6(KTR_IW_CXGBE,
	    "%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx",
	    __func__, wq->sq.qid, wq->rq.qid, wq->db,
	    (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);

	return 0;
err7:
	contigfree(wq->rq.queue, wq->rq.memsize, M_DEVBUF);
err6:
	dealloc_sq(rdev, &wq->sq);
err5:
	c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
err4:
	kfree(wq->rq.sw_rq);
err3:
	kfree(wq->sq.sw_sq);
err2:
	c4iw_put_qpid(rdev, wq->rq.qid, uctx);
err1:
	c4iw_put_qpid(rdev, wq->sq.qid, uctx);
	return -ENOMEM;
}